Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf...
[platform/kernel/linux-rpi.git] / mm / huge_memory.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 2009  Red Hat, Inc.
4  */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/sched/mm.h>
11 #include <linux/sched/coredump.h>
12 #include <linux/sched/numa_balancing.h>
13 #include <linux/highmem.h>
14 #include <linux/hugetlb.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/rmap.h>
17 #include <linux/swap.h>
18 #include <linux/shrinker.h>
19 #include <linux/mm_inline.h>
20 #include <linux/swapops.h>
21 #include <linux/backing-dev.h>
22 #include <linux/dax.h>
23 #include <linux/khugepaged.h>
24 #include <linux/freezer.h>
25 #include <linux/pfn_t.h>
26 #include <linux/mman.h>
27 #include <linux/memremap.h>
28 #include <linux/pagemap.h>
29 #include <linux/debugfs.h>
30 #include <linux/migrate.h>
31 #include <linux/hashtable.h>
32 #include <linux/userfaultfd_k.h>
33 #include <linux/page_idle.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/oom.h>
36 #include <linux/numa.h>
37 #include <linux/page_owner.h>
38 #include <linux/sched/sysctl.h>
39 #include <linux/memory-tiers.h>
40
41 #include <asm/tlb.h>
42 #include <asm/pgalloc.h>
43 #include "internal.h"
44 #include "swap.h"
45
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/thp.h>
48
49 /*
50  * By default, transparent hugepage support is disabled in order to avoid
51  * risking an increased memory footprint for applications that are not
52  * guaranteed to benefit from it. When transparent hugepage support is
53  * enabled, it is for all mappings, and khugepaged scans all mappings.
54  * Defrag is invoked by khugepaged hugepage allocations and by page faults
55  * for all hugepage allocations.
56  */
57 unsigned long transparent_hugepage_flags __read_mostly =
58 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
59         (1<<TRANSPARENT_HUGEPAGE_FLAG)|
60 #endif
61 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
62         (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
63 #endif
64         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
65         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
66         (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
67
68 static struct shrinker deferred_split_shrinker;
69
70 static atomic_t huge_zero_refcount;
71 struct page *huge_zero_page __read_mostly;
72 unsigned long huge_zero_pfn __read_mostly = ~0UL;
73
74 bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
75                         bool smaps, bool in_pf, bool enforce_sysfs)
76 {
77         if (!vma->vm_mm)                /* vdso */
78                 return false;
79
80         /*
81          * Explicitly disabled through madvise or prctl, or some
82          * architectures may disable THP for some mappings, for
83          * example, s390 kvm.
84          * */
85         if ((vm_flags & VM_NOHUGEPAGE) ||
86             test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
87                 return false;
88         /*
89          * If the hardware/firmware marked hugepage support disabled.
90          */
91         if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
92                 return false;
93
94         /* khugepaged doesn't collapse DAX vma, but page fault is fine. */
95         if (vma_is_dax(vma))
96                 return in_pf;
97
98         /*
99          * Special VMA and hugetlb VMA.
100          * Must be checked after dax since some dax mappings may have
101          * VM_MIXEDMAP set.
102          */
103         if (vm_flags & VM_NO_KHUGEPAGED)
104                 return false;
105
106         /*
107          * Check alignment for file vma and size for both file and anon vma.
108          *
109          * Skip the check for page fault. Huge fault does the check in fault
110          * handlers. And this check is not suitable for huge PUD fault.
111          */
112         if (!in_pf &&
113             !transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE)))
114                 return false;
115
116         /*
117          * Enabled via shmem mount options or sysfs settings.
118          * Must be done before hugepage flags check since shmem has its
119          * own flags.
120          */
121         if (!in_pf && shmem_file(vma->vm_file))
122                 return shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff,
123                                      !enforce_sysfs, vma->vm_mm, vm_flags);
124
125         /* Enforce sysfs THP requirements as necessary */
126         if (enforce_sysfs &&
127             (!hugepage_flags_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
128                                            !hugepage_flags_always())))
129                 return false;
130
131         /* Only regular file is valid */
132         if (!in_pf && file_thp_enabled(vma))
133                 return true;
134
135         if (!vma_is_anonymous(vma))
136                 return false;
137
138         if (vma_is_temporary_stack(vma))
139                 return false;
140
141         /*
142          * THPeligible bit of smaps should show 1 for proper VMAs even
143          * though anon_vma is not initialized yet.
144          *
145          * Allow page fault since anon_vma may be not initialized until
146          * the first page fault.
147          */
148         if (!vma->anon_vma)
149                 return (smaps || in_pf);
150
151         return true;
152 }
153
154 static bool get_huge_zero_page(void)
155 {
156         struct page *zero_page;
157 retry:
158         if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
159                 return true;
160
161         zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
162                         HPAGE_PMD_ORDER);
163         if (!zero_page) {
164                 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
165                 return false;
166         }
167         preempt_disable();
168         if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
169                 preempt_enable();
170                 __free_pages(zero_page, compound_order(zero_page));
171                 goto retry;
172         }
173         WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
174
175         /* We take additional reference here. It will be put back by shrinker */
176         atomic_set(&huge_zero_refcount, 2);
177         preempt_enable();
178         count_vm_event(THP_ZERO_PAGE_ALLOC);
179         return true;
180 }
181
182 static void put_huge_zero_page(void)
183 {
184         /*
185          * Counter should never go to zero here. Only shrinker can put
186          * last reference.
187          */
188         BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
189 }
190
191 struct page *mm_get_huge_zero_page(struct mm_struct *mm)
192 {
193         if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
194                 return READ_ONCE(huge_zero_page);
195
196         if (!get_huge_zero_page())
197                 return NULL;
198
199         if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
200                 put_huge_zero_page();
201
202         return READ_ONCE(huge_zero_page);
203 }
204
205 void mm_put_huge_zero_page(struct mm_struct *mm)
206 {
207         if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
208                 put_huge_zero_page();
209 }
210
211 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
212                                         struct shrink_control *sc)
213 {
214         /* we can free zero page only if last reference remains */
215         return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
216 }
217
218 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
219                                        struct shrink_control *sc)
220 {
221         if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
222                 struct page *zero_page = xchg(&huge_zero_page, NULL);
223                 BUG_ON(zero_page == NULL);
224                 WRITE_ONCE(huge_zero_pfn, ~0UL);
225                 __free_pages(zero_page, compound_order(zero_page));
226                 return HPAGE_PMD_NR;
227         }
228
229         return 0;
230 }
231
232 static struct shrinker huge_zero_page_shrinker = {
233         .count_objects = shrink_huge_zero_page_count,
234         .scan_objects = shrink_huge_zero_page_scan,
235         .seeks = DEFAULT_SEEKS,
236 };
237
238 #ifdef CONFIG_SYSFS
239 static ssize_t enabled_show(struct kobject *kobj,
240                             struct kobj_attribute *attr, char *buf)
241 {
242         const char *output;
243
244         if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
245                 output = "[always] madvise never";
246         else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
247                           &transparent_hugepage_flags))
248                 output = "always [madvise] never";
249         else
250                 output = "always madvise [never]";
251
252         return sysfs_emit(buf, "%s\n", output);
253 }
254
255 static ssize_t enabled_store(struct kobject *kobj,
256                              struct kobj_attribute *attr,
257                              const char *buf, size_t count)
258 {
259         ssize_t ret = count;
260
261         if (sysfs_streq(buf, "always")) {
262                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
263                 set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
264         } else if (sysfs_streq(buf, "madvise")) {
265                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
266                 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
267         } else if (sysfs_streq(buf, "never")) {
268                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
269                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
270         } else
271                 ret = -EINVAL;
272
273         if (ret > 0) {
274                 int err = start_stop_khugepaged();
275                 if (err)
276                         ret = err;
277         }
278         return ret;
279 }
280
281 static struct kobj_attribute enabled_attr = __ATTR_RW(enabled);
282
283 ssize_t single_hugepage_flag_show(struct kobject *kobj,
284                                   struct kobj_attribute *attr, char *buf,
285                                   enum transparent_hugepage_flag flag)
286 {
287         return sysfs_emit(buf, "%d\n",
288                           !!test_bit(flag, &transparent_hugepage_flags));
289 }
290
291 ssize_t single_hugepage_flag_store(struct kobject *kobj,
292                                  struct kobj_attribute *attr,
293                                  const char *buf, size_t count,
294                                  enum transparent_hugepage_flag flag)
295 {
296         unsigned long value;
297         int ret;
298
299         ret = kstrtoul(buf, 10, &value);
300         if (ret < 0)
301                 return ret;
302         if (value > 1)
303                 return -EINVAL;
304
305         if (value)
306                 set_bit(flag, &transparent_hugepage_flags);
307         else
308                 clear_bit(flag, &transparent_hugepage_flags);
309
310         return count;
311 }
312
313 static ssize_t defrag_show(struct kobject *kobj,
314                            struct kobj_attribute *attr, char *buf)
315 {
316         const char *output;
317
318         if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
319                      &transparent_hugepage_flags))
320                 output = "[always] defer defer+madvise madvise never";
321         else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
322                           &transparent_hugepage_flags))
323                 output = "always [defer] defer+madvise madvise never";
324         else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
325                           &transparent_hugepage_flags))
326                 output = "always defer [defer+madvise] madvise never";
327         else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
328                           &transparent_hugepage_flags))
329                 output = "always defer defer+madvise [madvise] never";
330         else
331                 output = "always defer defer+madvise madvise [never]";
332
333         return sysfs_emit(buf, "%s\n", output);
334 }
335
336 static ssize_t defrag_store(struct kobject *kobj,
337                             struct kobj_attribute *attr,
338                             const char *buf, size_t count)
339 {
340         if (sysfs_streq(buf, "always")) {
341                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
342                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
343                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
344                 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
345         } else if (sysfs_streq(buf, "defer+madvise")) {
346                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
347                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
348                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
349                 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
350         } else if (sysfs_streq(buf, "defer")) {
351                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
352                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
353                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
354                 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
355         } else if (sysfs_streq(buf, "madvise")) {
356                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
357                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
358                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
359                 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
360         } else if (sysfs_streq(buf, "never")) {
361                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
362                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
363                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
364                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
365         } else
366                 return -EINVAL;
367
368         return count;
369 }
370 static struct kobj_attribute defrag_attr = __ATTR_RW(defrag);
371
372 static ssize_t use_zero_page_show(struct kobject *kobj,
373                                   struct kobj_attribute *attr, char *buf)
374 {
375         return single_hugepage_flag_show(kobj, attr, buf,
376                                          TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
377 }
378 static ssize_t use_zero_page_store(struct kobject *kobj,
379                 struct kobj_attribute *attr, const char *buf, size_t count)
380 {
381         return single_hugepage_flag_store(kobj, attr, buf, count,
382                                  TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
383 }
384 static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page);
385
386 static ssize_t hpage_pmd_size_show(struct kobject *kobj,
387                                    struct kobj_attribute *attr, char *buf)
388 {
389         return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE);
390 }
391 static struct kobj_attribute hpage_pmd_size_attr =
392         __ATTR_RO(hpage_pmd_size);
393
394 static struct attribute *hugepage_attr[] = {
395         &enabled_attr.attr,
396         &defrag_attr.attr,
397         &use_zero_page_attr.attr,
398         &hpage_pmd_size_attr.attr,
399 #ifdef CONFIG_SHMEM
400         &shmem_enabled_attr.attr,
401 #endif
402         NULL,
403 };
404
405 static const struct attribute_group hugepage_attr_group = {
406         .attrs = hugepage_attr,
407 };
408
409 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
410 {
411         int err;
412
413         *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
414         if (unlikely(!*hugepage_kobj)) {
415                 pr_err("failed to create transparent hugepage kobject\n");
416                 return -ENOMEM;
417         }
418
419         err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
420         if (err) {
421                 pr_err("failed to register transparent hugepage group\n");
422                 goto delete_obj;
423         }
424
425         err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
426         if (err) {
427                 pr_err("failed to register transparent hugepage group\n");
428                 goto remove_hp_group;
429         }
430
431         return 0;
432
433 remove_hp_group:
434         sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
435 delete_obj:
436         kobject_put(*hugepage_kobj);
437         return err;
438 }
439
440 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
441 {
442         sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
443         sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
444         kobject_put(hugepage_kobj);
445 }
446 #else
447 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
448 {
449         return 0;
450 }
451
452 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
453 {
454 }
455 #endif /* CONFIG_SYSFS */
456
457 static int __init hugepage_init(void)
458 {
459         int err;
460         struct kobject *hugepage_kobj;
461
462         if (!has_transparent_hugepage()) {
463                 transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED;
464                 return -EINVAL;
465         }
466
467         /*
468          * hugepages can't be allocated by the buddy allocator
469          */
470         MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_ORDER);
471         /*
472          * we use page->mapping and page->index in second tail page
473          * as list_head: assuming THP order >= 2
474          */
475         MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
476
477         err = hugepage_init_sysfs(&hugepage_kobj);
478         if (err)
479                 goto err_sysfs;
480
481         err = khugepaged_init();
482         if (err)
483                 goto err_slab;
484
485         err = register_shrinker(&huge_zero_page_shrinker, "thp-zero");
486         if (err)
487                 goto err_hzp_shrinker;
488         err = register_shrinker(&deferred_split_shrinker, "thp-deferred_split");
489         if (err)
490                 goto err_split_shrinker;
491
492         /*
493          * By default disable transparent hugepages on smaller systems,
494          * where the extra memory used could hurt more than TLB overhead
495          * is likely to save.  The admin can still enable it through /sys.
496          */
497         if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
498                 transparent_hugepage_flags = 0;
499                 return 0;
500         }
501
502         err = start_stop_khugepaged();
503         if (err)
504                 goto err_khugepaged;
505
506         return 0;
507 err_khugepaged:
508         unregister_shrinker(&deferred_split_shrinker);
509 err_split_shrinker:
510         unregister_shrinker(&huge_zero_page_shrinker);
511 err_hzp_shrinker:
512         khugepaged_destroy();
513 err_slab:
514         hugepage_exit_sysfs(hugepage_kobj);
515 err_sysfs:
516         return err;
517 }
518 subsys_initcall(hugepage_init);
519
520 static int __init setup_transparent_hugepage(char *str)
521 {
522         int ret = 0;
523         if (!str)
524                 goto out;
525         if (!strcmp(str, "always")) {
526                 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
527                         &transparent_hugepage_flags);
528                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
529                           &transparent_hugepage_flags);
530                 ret = 1;
531         } else if (!strcmp(str, "madvise")) {
532                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
533                           &transparent_hugepage_flags);
534                 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
535                         &transparent_hugepage_flags);
536                 ret = 1;
537         } else if (!strcmp(str, "never")) {
538                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
539                           &transparent_hugepage_flags);
540                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
541                           &transparent_hugepage_flags);
542                 ret = 1;
543         }
544 out:
545         if (!ret)
546                 pr_warn("transparent_hugepage= cannot parse, ignored\n");
547         return ret;
548 }
549 __setup("transparent_hugepage=", setup_transparent_hugepage);
550
551 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
552 {
553         if (likely(vma->vm_flags & VM_WRITE))
554                 pmd = pmd_mkwrite(pmd);
555         return pmd;
556 }
557
558 #ifdef CONFIG_MEMCG
559 static inline
560 struct deferred_split *get_deferred_split_queue(struct folio *folio)
561 {
562         struct mem_cgroup *memcg = folio_memcg(folio);
563         struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
564
565         if (memcg)
566                 return &memcg->deferred_split_queue;
567         else
568                 return &pgdat->deferred_split_queue;
569 }
570 #else
571 static inline
572 struct deferred_split *get_deferred_split_queue(struct folio *folio)
573 {
574         struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
575
576         return &pgdat->deferred_split_queue;
577 }
578 #endif
579
580 void prep_transhuge_page(struct page *page)
581 {
582         struct folio *folio = (struct folio *)page;
583
584         VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
585         INIT_LIST_HEAD(&folio->_deferred_list);
586         set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
587 }
588
589 static inline bool is_transparent_hugepage(struct page *page)
590 {
591         struct folio *folio;
592
593         if (!PageCompound(page))
594                 return false;
595
596         folio = page_folio(page);
597         return is_huge_zero_page(&folio->page) ||
598                folio->_folio_dtor == TRANSHUGE_PAGE_DTOR;
599 }
600
601 static unsigned long __thp_get_unmapped_area(struct file *filp,
602                 unsigned long addr, unsigned long len,
603                 loff_t off, unsigned long flags, unsigned long size)
604 {
605         loff_t off_end = off + len;
606         loff_t off_align = round_up(off, size);
607         unsigned long len_pad, ret;
608
609         if (off_end <= off_align || (off_end - off_align) < size)
610                 return 0;
611
612         len_pad = len + size;
613         if (len_pad < len || (off + len_pad) < off)
614                 return 0;
615
616         ret = current->mm->get_unmapped_area(filp, addr, len_pad,
617                                               off >> PAGE_SHIFT, flags);
618
619         /*
620          * The failure might be due to length padding. The caller will retry
621          * without the padding.
622          */
623         if (IS_ERR_VALUE(ret))
624                 return 0;
625
626         /*
627          * Do not try to align to THP boundary if allocation at the address
628          * hint succeeds.
629          */
630         if (ret == addr)
631                 return addr;
632
633         ret += (off - ret) & (size - 1);
634         return ret;
635 }
636
637 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
638                 unsigned long len, unsigned long pgoff, unsigned long flags)
639 {
640         unsigned long ret;
641         loff_t off = (loff_t)pgoff << PAGE_SHIFT;
642
643         ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE);
644         if (ret)
645                 return ret;
646
647         return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
648 }
649 EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
650
651 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
652                         struct page *page, gfp_t gfp)
653 {
654         struct vm_area_struct *vma = vmf->vma;
655         struct folio *folio = page_folio(page);
656         pgtable_t pgtable;
657         unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
658         vm_fault_t ret = 0;
659
660         VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
661
662         if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
663                 folio_put(folio);
664                 count_vm_event(THP_FAULT_FALLBACK);
665                 count_vm_event(THP_FAULT_FALLBACK_CHARGE);
666                 return VM_FAULT_FALLBACK;
667         }
668         folio_throttle_swaprate(folio, gfp);
669
670         pgtable = pte_alloc_one(vma->vm_mm);
671         if (unlikely(!pgtable)) {
672                 ret = VM_FAULT_OOM;
673                 goto release;
674         }
675
676         clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
677         /*
678          * The memory barrier inside __folio_mark_uptodate makes sure that
679          * clear_huge_page writes become visible before the set_pmd_at()
680          * write.
681          */
682         __folio_mark_uptodate(folio);
683
684         vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
685         if (unlikely(!pmd_none(*vmf->pmd))) {
686                 goto unlock_release;
687         } else {
688                 pmd_t entry;
689
690                 ret = check_stable_address_space(vma->vm_mm);
691                 if (ret)
692                         goto unlock_release;
693
694                 /* Deliver the page fault to userland */
695                 if (userfaultfd_missing(vma)) {
696                         spin_unlock(vmf->ptl);
697                         folio_put(folio);
698                         pte_free(vma->vm_mm, pgtable);
699                         ret = handle_userfault(vmf, VM_UFFD_MISSING);
700                         VM_BUG_ON(ret & VM_FAULT_FALLBACK);
701                         return ret;
702                 }
703
704                 entry = mk_huge_pmd(page, vma->vm_page_prot);
705                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
706                 folio_add_new_anon_rmap(folio, vma, haddr);
707                 folio_add_lru_vma(folio, vma);
708                 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
709                 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
710                 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
711                 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
712                 mm_inc_nr_ptes(vma->vm_mm);
713                 spin_unlock(vmf->ptl);
714                 count_vm_event(THP_FAULT_ALLOC);
715                 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
716         }
717
718         return 0;
719 unlock_release:
720         spin_unlock(vmf->ptl);
721 release:
722         if (pgtable)
723                 pte_free(vma->vm_mm, pgtable);
724         folio_put(folio);
725         return ret;
726
727 }
728
729 /*
730  * always: directly stall for all thp allocations
731  * defer: wake kswapd and fail if not immediately available
732  * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
733  *                fail if not immediately available
734  * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
735  *          available
736  * never: never stall for any thp allocation
737  */
738 gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
739 {
740         const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE);
741
742         /* Always do synchronous compaction */
743         if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
744                 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
745
746         /* Kick kcompactd and fail quickly */
747         if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
748                 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
749
750         /* Synchronous compaction if madvised, otherwise kick kcompactd */
751         if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
752                 return GFP_TRANSHUGE_LIGHT |
753                         (vma_madvised ? __GFP_DIRECT_RECLAIM :
754                                         __GFP_KSWAPD_RECLAIM);
755
756         /* Only do synchronous compaction if madvised */
757         if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
758                 return GFP_TRANSHUGE_LIGHT |
759                        (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
760
761         return GFP_TRANSHUGE_LIGHT;
762 }
763
764 /* Caller must hold page table lock. */
765 static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
766                 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
767                 struct page *zero_page)
768 {
769         pmd_t entry;
770         if (!pmd_none(*pmd))
771                 return;
772         entry = mk_pmd(zero_page, vma->vm_page_prot);
773         entry = pmd_mkhuge(entry);
774         pgtable_trans_huge_deposit(mm, pmd, pgtable);
775         set_pmd_at(mm, haddr, pmd, entry);
776         mm_inc_nr_ptes(mm);
777 }
778
779 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
780 {
781         struct vm_area_struct *vma = vmf->vma;
782         gfp_t gfp;
783         struct folio *folio;
784         unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
785
786         if (!transhuge_vma_suitable(vma, haddr))
787                 return VM_FAULT_FALLBACK;
788         if (unlikely(anon_vma_prepare(vma)))
789                 return VM_FAULT_OOM;
790         khugepaged_enter_vma(vma, vma->vm_flags);
791
792         if (!(vmf->flags & FAULT_FLAG_WRITE) &&
793                         !mm_forbids_zeropage(vma->vm_mm) &&
794                         transparent_hugepage_use_zero_page()) {
795                 pgtable_t pgtable;
796                 struct page *zero_page;
797                 vm_fault_t ret;
798                 pgtable = pte_alloc_one(vma->vm_mm);
799                 if (unlikely(!pgtable))
800                         return VM_FAULT_OOM;
801                 zero_page = mm_get_huge_zero_page(vma->vm_mm);
802                 if (unlikely(!zero_page)) {
803                         pte_free(vma->vm_mm, pgtable);
804                         count_vm_event(THP_FAULT_FALLBACK);
805                         return VM_FAULT_FALLBACK;
806                 }
807                 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
808                 ret = 0;
809                 if (pmd_none(*vmf->pmd)) {
810                         ret = check_stable_address_space(vma->vm_mm);
811                         if (ret) {
812                                 spin_unlock(vmf->ptl);
813                                 pte_free(vma->vm_mm, pgtable);
814                         } else if (userfaultfd_missing(vma)) {
815                                 spin_unlock(vmf->ptl);
816                                 pte_free(vma->vm_mm, pgtable);
817                                 ret = handle_userfault(vmf, VM_UFFD_MISSING);
818                                 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
819                         } else {
820                                 set_huge_zero_page(pgtable, vma->vm_mm, vma,
821                                                    haddr, vmf->pmd, zero_page);
822                                 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
823                                 spin_unlock(vmf->ptl);
824                         }
825                 } else {
826                         spin_unlock(vmf->ptl);
827                         pte_free(vma->vm_mm, pgtable);
828                 }
829                 return ret;
830         }
831         gfp = vma_thp_gfp_mask(vma);
832         folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true);
833         if (unlikely(!folio)) {
834                 count_vm_event(THP_FAULT_FALLBACK);
835                 return VM_FAULT_FALLBACK;
836         }
837         return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp);
838 }
839
840 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
841                 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
842                 pgtable_t pgtable)
843 {
844         struct mm_struct *mm = vma->vm_mm;
845         pmd_t entry;
846         spinlock_t *ptl;
847
848         ptl = pmd_lock(mm, pmd);
849         if (!pmd_none(*pmd)) {
850                 if (write) {
851                         if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
852                                 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
853                                 goto out_unlock;
854                         }
855                         entry = pmd_mkyoung(*pmd);
856                         entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
857                         if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
858                                 update_mmu_cache_pmd(vma, addr, pmd);
859                 }
860
861                 goto out_unlock;
862         }
863
864         entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
865         if (pfn_t_devmap(pfn))
866                 entry = pmd_mkdevmap(entry);
867         if (write) {
868                 entry = pmd_mkyoung(pmd_mkdirty(entry));
869                 entry = maybe_pmd_mkwrite(entry, vma);
870         }
871
872         if (pgtable) {
873                 pgtable_trans_huge_deposit(mm, pmd, pgtable);
874                 mm_inc_nr_ptes(mm);
875                 pgtable = NULL;
876         }
877
878         set_pmd_at(mm, addr, pmd, entry);
879         update_mmu_cache_pmd(vma, addr, pmd);
880
881 out_unlock:
882         spin_unlock(ptl);
883         if (pgtable)
884                 pte_free(mm, pgtable);
885 }
886
887 /**
888  * vmf_insert_pfn_pmd - insert a pmd size pfn
889  * @vmf: Structure describing the fault
890  * @pfn: pfn to insert
891  * @write: whether it's a write fault
892  *
893  * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
894  *
895  * Return: vm_fault_t value.
896  */
897 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
898 {
899         unsigned long addr = vmf->address & PMD_MASK;
900         struct vm_area_struct *vma = vmf->vma;
901         pgprot_t pgprot = vma->vm_page_prot;
902         pgtable_t pgtable = NULL;
903
904         /*
905          * If we had pmd_special, we could avoid all these restrictions,
906          * but we need to be consistent with PTEs and architectures that
907          * can't support a 'special' bit.
908          */
909         BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
910                         !pfn_t_devmap(pfn));
911         BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
912                                                 (VM_PFNMAP|VM_MIXEDMAP));
913         BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
914
915         if (addr < vma->vm_start || addr >= vma->vm_end)
916                 return VM_FAULT_SIGBUS;
917
918         if (arch_needs_pgtable_deposit()) {
919                 pgtable = pte_alloc_one(vma->vm_mm);
920                 if (!pgtable)
921                         return VM_FAULT_OOM;
922         }
923
924         track_pfn_insert(vma, &pgprot, pfn);
925
926         insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
927         return VM_FAULT_NOPAGE;
928 }
929 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
930
931 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
932 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
933 {
934         if (likely(vma->vm_flags & VM_WRITE))
935                 pud = pud_mkwrite(pud);
936         return pud;
937 }
938
939 static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
940                 pud_t *pud, pfn_t pfn, bool write)
941 {
942         struct mm_struct *mm = vma->vm_mm;
943         pgprot_t prot = vma->vm_page_prot;
944         pud_t entry;
945         spinlock_t *ptl;
946
947         ptl = pud_lock(mm, pud);
948         if (!pud_none(*pud)) {
949                 if (write) {
950                         if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
951                                 WARN_ON_ONCE(!is_huge_zero_pud(*pud));
952                                 goto out_unlock;
953                         }
954                         entry = pud_mkyoung(*pud);
955                         entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
956                         if (pudp_set_access_flags(vma, addr, pud, entry, 1))
957                                 update_mmu_cache_pud(vma, addr, pud);
958                 }
959                 goto out_unlock;
960         }
961
962         entry = pud_mkhuge(pfn_t_pud(pfn, prot));
963         if (pfn_t_devmap(pfn))
964                 entry = pud_mkdevmap(entry);
965         if (write) {
966                 entry = pud_mkyoung(pud_mkdirty(entry));
967                 entry = maybe_pud_mkwrite(entry, vma);
968         }
969         set_pud_at(mm, addr, pud, entry);
970         update_mmu_cache_pud(vma, addr, pud);
971
972 out_unlock:
973         spin_unlock(ptl);
974 }
975
976 /**
977  * vmf_insert_pfn_pud - insert a pud size pfn
978  * @vmf: Structure describing the fault
979  * @pfn: pfn to insert
980  * @write: whether it's a write fault
981  *
982  * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
983  *
984  * Return: vm_fault_t value.
985  */
986 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
987 {
988         unsigned long addr = vmf->address & PUD_MASK;
989         struct vm_area_struct *vma = vmf->vma;
990         pgprot_t pgprot = vma->vm_page_prot;
991
992         /*
993          * If we had pud_special, we could avoid all these restrictions,
994          * but we need to be consistent with PTEs and architectures that
995          * can't support a 'special' bit.
996          */
997         BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
998                         !pfn_t_devmap(pfn));
999         BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1000                                                 (VM_PFNMAP|VM_MIXEDMAP));
1001         BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1002
1003         if (addr < vma->vm_start || addr >= vma->vm_end)
1004                 return VM_FAULT_SIGBUS;
1005
1006         track_pfn_insert(vma, &pgprot, pfn);
1007
1008         insert_pfn_pud(vma, addr, vmf->pud, pfn, write);
1009         return VM_FAULT_NOPAGE;
1010 }
1011 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
1012 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1013
1014 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1015                       pmd_t *pmd, bool write)
1016 {
1017         pmd_t _pmd;
1018
1019         _pmd = pmd_mkyoung(*pmd);
1020         if (write)
1021                 _pmd = pmd_mkdirty(_pmd);
1022         if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1023                                   pmd, _pmd, write))
1024                 update_mmu_cache_pmd(vma, addr, pmd);
1025 }
1026
1027 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
1028                 pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
1029 {
1030         unsigned long pfn = pmd_pfn(*pmd);
1031         struct mm_struct *mm = vma->vm_mm;
1032         struct page *page;
1033         int ret;
1034
1035         assert_spin_locked(pmd_lockptr(mm, pmd));
1036
1037         if (flags & FOLL_WRITE && !pmd_write(*pmd))
1038                 return NULL;
1039
1040         if (pmd_present(*pmd) && pmd_devmap(*pmd))
1041                 /* pass */;
1042         else
1043                 return NULL;
1044
1045         if (flags & FOLL_TOUCH)
1046                 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
1047
1048         /*
1049          * device mapped pages can only be returned if the
1050          * caller will manage the page reference count.
1051          */
1052         if (!(flags & (FOLL_GET | FOLL_PIN)))
1053                 return ERR_PTR(-EEXIST);
1054
1055         pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
1056         *pgmap = get_dev_pagemap(pfn, *pgmap);
1057         if (!*pgmap)
1058                 return ERR_PTR(-EFAULT);
1059         page = pfn_to_page(pfn);
1060         ret = try_grab_page(page, flags);
1061         if (ret)
1062                 page = ERR_PTR(ret);
1063
1064         return page;
1065 }
1066
1067 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1068                   pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1069                   struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1070 {
1071         spinlock_t *dst_ptl, *src_ptl;
1072         struct page *src_page;
1073         pmd_t pmd;
1074         pgtable_t pgtable = NULL;
1075         int ret = -ENOMEM;
1076
1077         /* Skip if can be re-fill on fault */
1078         if (!vma_is_anonymous(dst_vma))
1079                 return 0;
1080
1081         pgtable = pte_alloc_one(dst_mm);
1082         if (unlikely(!pgtable))
1083                 goto out;
1084
1085         dst_ptl = pmd_lock(dst_mm, dst_pmd);
1086         src_ptl = pmd_lockptr(src_mm, src_pmd);
1087         spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1088
1089         ret = -EAGAIN;
1090         pmd = *src_pmd;
1091
1092 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1093         if (unlikely(is_swap_pmd(pmd))) {
1094                 swp_entry_t entry = pmd_to_swp_entry(pmd);
1095
1096                 VM_BUG_ON(!is_pmd_migration_entry(pmd));
1097                 if (!is_readable_migration_entry(entry)) {
1098                         entry = make_readable_migration_entry(
1099                                                         swp_offset(entry));
1100                         pmd = swp_entry_to_pmd(entry);
1101                         if (pmd_swp_soft_dirty(*src_pmd))
1102                                 pmd = pmd_swp_mksoft_dirty(pmd);
1103                         if (pmd_swp_uffd_wp(*src_pmd))
1104                                 pmd = pmd_swp_mkuffd_wp(pmd);
1105                         set_pmd_at(src_mm, addr, src_pmd, pmd);
1106                 }
1107                 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1108                 mm_inc_nr_ptes(dst_mm);
1109                 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1110                 if (!userfaultfd_wp(dst_vma))
1111                         pmd = pmd_swp_clear_uffd_wp(pmd);
1112                 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1113                 ret = 0;
1114                 goto out_unlock;
1115         }
1116 #endif
1117
1118         if (unlikely(!pmd_trans_huge(pmd))) {
1119                 pte_free(dst_mm, pgtable);
1120                 goto out_unlock;
1121         }
1122         /*
1123          * When page table lock is held, the huge zero pmd should not be
1124          * under splitting since we don't split the page itself, only pmd to
1125          * a page table.
1126          */
1127         if (is_huge_zero_pmd(pmd)) {
1128                 /*
1129                  * get_huge_zero_page() will never allocate a new page here,
1130                  * since we already have a zero page to copy. It just takes a
1131                  * reference.
1132                  */
1133                 mm_get_huge_zero_page(dst_mm);
1134                 goto out_zero_page;
1135         }
1136
1137         src_page = pmd_page(pmd);
1138         VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
1139
1140         get_page(src_page);
1141         if (unlikely(page_try_dup_anon_rmap(src_page, true, src_vma))) {
1142                 /* Page maybe pinned: split and retry the fault on PTEs. */
1143                 put_page(src_page);
1144                 pte_free(dst_mm, pgtable);
1145                 spin_unlock(src_ptl);
1146                 spin_unlock(dst_ptl);
1147                 __split_huge_pmd(src_vma, src_pmd, addr, false, NULL);
1148                 return -EAGAIN;
1149         }
1150         add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1151 out_zero_page:
1152         mm_inc_nr_ptes(dst_mm);
1153         pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1154         pmdp_set_wrprotect(src_mm, addr, src_pmd);
1155         if (!userfaultfd_wp(dst_vma))
1156                 pmd = pmd_clear_uffd_wp(pmd);
1157         pmd = pmd_mkold(pmd_wrprotect(pmd));
1158         set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1159
1160         ret = 0;
1161 out_unlock:
1162         spin_unlock(src_ptl);
1163         spin_unlock(dst_ptl);
1164 out:
1165         return ret;
1166 }
1167
1168 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1169 static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1170                       pud_t *pud, bool write)
1171 {
1172         pud_t _pud;
1173
1174         _pud = pud_mkyoung(*pud);
1175         if (write)
1176                 _pud = pud_mkdirty(_pud);
1177         if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
1178                                   pud, _pud, write))
1179                 update_mmu_cache_pud(vma, addr, pud);
1180 }
1181
1182 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
1183                 pud_t *pud, int flags, struct dev_pagemap **pgmap)
1184 {
1185         unsigned long pfn = pud_pfn(*pud);
1186         struct mm_struct *mm = vma->vm_mm;
1187         struct page *page;
1188         int ret;
1189
1190         assert_spin_locked(pud_lockptr(mm, pud));
1191
1192         if (flags & FOLL_WRITE && !pud_write(*pud))
1193                 return NULL;
1194
1195         if (pud_present(*pud) && pud_devmap(*pud))
1196                 /* pass */;
1197         else
1198                 return NULL;
1199
1200         if (flags & FOLL_TOUCH)
1201                 touch_pud(vma, addr, pud, flags & FOLL_WRITE);
1202
1203         /*
1204          * device mapped pages can only be returned if the
1205          * caller will manage the page reference count.
1206          *
1207          * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here:
1208          */
1209         if (!(flags & (FOLL_GET | FOLL_PIN)))
1210                 return ERR_PTR(-EEXIST);
1211
1212         pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
1213         *pgmap = get_dev_pagemap(pfn, *pgmap);
1214         if (!*pgmap)
1215                 return ERR_PTR(-EFAULT);
1216         page = pfn_to_page(pfn);
1217
1218         ret = try_grab_page(page, flags);
1219         if (ret)
1220                 page = ERR_PTR(ret);
1221
1222         return page;
1223 }
1224
1225 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1226                   pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1227                   struct vm_area_struct *vma)
1228 {
1229         spinlock_t *dst_ptl, *src_ptl;
1230         pud_t pud;
1231         int ret;
1232
1233         dst_ptl = pud_lock(dst_mm, dst_pud);
1234         src_ptl = pud_lockptr(src_mm, src_pud);
1235         spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1236
1237         ret = -EAGAIN;
1238         pud = *src_pud;
1239         if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
1240                 goto out_unlock;
1241
1242         /*
1243          * When page table lock is held, the huge zero pud should not be
1244          * under splitting since we don't split the page itself, only pud to
1245          * a page table.
1246          */
1247         if (is_huge_zero_pud(pud)) {
1248                 /* No huge zero pud yet */
1249         }
1250
1251         /*
1252          * TODO: once we support anonymous pages, use page_try_dup_anon_rmap()
1253          * and split if duplicating fails.
1254          */
1255         pudp_set_wrprotect(src_mm, addr, src_pud);
1256         pud = pud_mkold(pud_wrprotect(pud));
1257         set_pud_at(dst_mm, addr, dst_pud, pud);
1258
1259         ret = 0;
1260 out_unlock:
1261         spin_unlock(src_ptl);
1262         spin_unlock(dst_ptl);
1263         return ret;
1264 }
1265
1266 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1267 {
1268         bool write = vmf->flags & FAULT_FLAG_WRITE;
1269
1270         vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1271         if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1272                 goto unlock;
1273
1274         touch_pud(vmf->vma, vmf->address, vmf->pud, write);
1275 unlock:
1276         spin_unlock(vmf->ptl);
1277 }
1278 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1279
1280 void huge_pmd_set_accessed(struct vm_fault *vmf)
1281 {
1282         bool write = vmf->flags & FAULT_FLAG_WRITE;
1283
1284         vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1285         if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
1286                 goto unlock;
1287
1288         touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
1289
1290 unlock:
1291         spin_unlock(vmf->ptl);
1292 }
1293
1294 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
1295 {
1296         const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
1297         struct vm_area_struct *vma = vmf->vma;
1298         struct folio *folio;
1299         struct page *page;
1300         unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1301         pmd_t orig_pmd = vmf->orig_pmd;
1302
1303         vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
1304         VM_BUG_ON_VMA(!vma->anon_vma, vma);
1305
1306         if (is_huge_zero_pmd(orig_pmd))
1307                 goto fallback;
1308
1309         spin_lock(vmf->ptl);
1310
1311         if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1312                 spin_unlock(vmf->ptl);
1313                 return 0;
1314         }
1315
1316         page = pmd_page(orig_pmd);
1317         folio = page_folio(page);
1318         VM_BUG_ON_PAGE(!PageHead(page), page);
1319
1320         /* Early check when only holding the PT lock. */
1321         if (PageAnonExclusive(page))
1322                 goto reuse;
1323
1324         if (!folio_trylock(folio)) {
1325                 folio_get(folio);
1326                 spin_unlock(vmf->ptl);
1327                 folio_lock(folio);
1328                 spin_lock(vmf->ptl);
1329                 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1330                         spin_unlock(vmf->ptl);
1331                         folio_unlock(folio);
1332                         folio_put(folio);
1333                         return 0;
1334                 }
1335                 folio_put(folio);
1336         }
1337
1338         /* Recheck after temporarily dropping the PT lock. */
1339         if (PageAnonExclusive(page)) {
1340                 folio_unlock(folio);
1341                 goto reuse;
1342         }
1343
1344         /*
1345          * See do_wp_page(): we can only reuse the folio exclusively if
1346          * there are no additional references. Note that we always drain
1347          * the LRU pagevecs immediately after adding a THP.
1348          */
1349         if (folio_ref_count(folio) >
1350                         1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
1351                 goto unlock_fallback;
1352         if (folio_test_swapcache(folio))
1353                 folio_free_swap(folio);
1354         if (folio_ref_count(folio) == 1) {
1355                 pmd_t entry;
1356
1357                 page_move_anon_rmap(page, vma);
1358                 folio_unlock(folio);
1359 reuse:
1360                 if (unlikely(unshare)) {
1361                         spin_unlock(vmf->ptl);
1362                         return 0;
1363                 }
1364                 entry = pmd_mkyoung(orig_pmd);
1365                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1366                 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
1367                         update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1368                 spin_unlock(vmf->ptl);
1369                 return 0;
1370         }
1371
1372 unlock_fallback:
1373         folio_unlock(folio);
1374         spin_unlock(vmf->ptl);
1375 fallback:
1376         __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
1377         return VM_FAULT_FALLBACK;
1378 }
1379
1380 static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
1381                                            unsigned long addr, pmd_t pmd)
1382 {
1383         struct page *page;
1384
1385         if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
1386                 return false;
1387
1388         /* Don't touch entries that are not even readable (NUMA hinting). */
1389         if (pmd_protnone(pmd))
1390                 return false;
1391
1392         /* Do we need write faults for softdirty tracking? */
1393         if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
1394                 return false;
1395
1396         /* Do we need write faults for uffd-wp tracking? */
1397         if (userfaultfd_huge_pmd_wp(vma, pmd))
1398                 return false;
1399
1400         if (!(vma->vm_flags & VM_SHARED)) {
1401                 /* See can_change_pte_writable(). */
1402                 page = vm_normal_page_pmd(vma, addr, pmd);
1403                 return page && PageAnon(page) && PageAnonExclusive(page);
1404         }
1405
1406         /* See can_change_pte_writable(). */
1407         return pmd_dirty(pmd);
1408 }
1409
1410 /* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
1411 static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
1412                                         struct vm_area_struct *vma,
1413                                         unsigned int flags)
1414 {
1415         /* If the pmd is writable, we can write to the page. */
1416         if (pmd_write(pmd))
1417                 return true;
1418
1419         /* Maybe FOLL_FORCE is set to override it? */
1420         if (!(flags & FOLL_FORCE))
1421                 return false;
1422
1423         /* But FOLL_FORCE has no effect on shared mappings */
1424         if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
1425                 return false;
1426
1427         /* ... or read-only private ones */
1428         if (!(vma->vm_flags & VM_MAYWRITE))
1429                 return false;
1430
1431         /* ... or already writable ones that just need to take a write fault */
1432         if (vma->vm_flags & VM_WRITE)
1433                 return false;
1434
1435         /*
1436          * See can_change_pte_writable(): we broke COW and could map the page
1437          * writable if we have an exclusive anonymous page ...
1438          */
1439         if (!page || !PageAnon(page) || !PageAnonExclusive(page))
1440                 return false;
1441
1442         /* ... and a write-fault isn't required for other reasons. */
1443         if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
1444                 return false;
1445         return !userfaultfd_huge_pmd_wp(vma, pmd);
1446 }
1447
1448 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1449                                    unsigned long addr,
1450                                    pmd_t *pmd,
1451                                    unsigned int flags)
1452 {
1453         struct mm_struct *mm = vma->vm_mm;
1454         struct page *page;
1455         int ret;
1456
1457         assert_spin_locked(pmd_lockptr(mm, pmd));
1458
1459         page = pmd_page(*pmd);
1460         VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
1461
1462         if ((flags & FOLL_WRITE) &&
1463             !can_follow_write_pmd(*pmd, page, vma, flags))
1464                 return NULL;
1465
1466         /* Avoid dumping huge zero page */
1467         if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1468                 return ERR_PTR(-EFAULT);
1469
1470         /* Full NUMA hinting faults to serialise migration in fault paths */
1471         if (pmd_protnone(*pmd) && !gup_can_follow_protnone(flags))
1472                 return NULL;
1473
1474         if (!pmd_write(*pmd) && gup_must_unshare(vma, flags, page))
1475                 return ERR_PTR(-EMLINK);
1476
1477         VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
1478                         !PageAnonExclusive(page), page);
1479
1480         ret = try_grab_page(page, flags);
1481         if (ret)
1482                 return ERR_PTR(ret);
1483
1484         if (flags & FOLL_TOUCH)
1485                 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
1486
1487         page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1488         VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
1489
1490         return page;
1491 }
1492
1493 /* NUMA hinting page fault entry point for trans huge pmds */
1494 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
1495 {
1496         struct vm_area_struct *vma = vmf->vma;
1497         pmd_t oldpmd = vmf->orig_pmd;
1498         pmd_t pmd;
1499         struct page *page;
1500         unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1501         int page_nid = NUMA_NO_NODE;
1502         int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
1503         bool migrated = false, writable = false;
1504         int flags = 0;
1505
1506         vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1507         if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1508                 spin_unlock(vmf->ptl);
1509                 goto out;
1510         }
1511
1512         pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1513
1514         /*
1515          * Detect now whether the PMD could be writable; this information
1516          * is only valid while holding the PT lock.
1517          */
1518         writable = pmd_write(pmd);
1519         if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
1520             can_change_pmd_writable(vma, vmf->address, pmd))
1521                 writable = true;
1522
1523         page = vm_normal_page_pmd(vma, haddr, pmd);
1524         if (!page)
1525                 goto out_map;
1526
1527         /* See similar comment in do_numa_page for explanation */
1528         if (!writable)
1529                 flags |= TNF_NO_GROUP;
1530
1531         page_nid = page_to_nid(page);
1532         /*
1533          * For memory tiering mode, cpupid of slow memory page is used
1534          * to record page access time.  So use default value.
1535          */
1536         if (node_is_toptier(page_nid))
1537                 last_cpupid = page_cpupid_last(page);
1538         target_nid = numa_migrate_prep(page, vma, haddr, page_nid,
1539                                        &flags);
1540
1541         if (target_nid == NUMA_NO_NODE) {
1542                 put_page(page);
1543                 goto out_map;
1544         }
1545
1546         spin_unlock(vmf->ptl);
1547         writable = false;
1548
1549         migrated = migrate_misplaced_page(page, vma, target_nid);
1550         if (migrated) {
1551                 flags |= TNF_MIGRATED;
1552                 page_nid = target_nid;
1553         } else {
1554                 flags |= TNF_MIGRATE_FAIL;
1555                 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1556                 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1557                         spin_unlock(vmf->ptl);
1558                         goto out;
1559                 }
1560                 goto out_map;
1561         }
1562
1563 out:
1564         if (page_nid != NUMA_NO_NODE)
1565                 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
1566                                 flags);
1567
1568         return 0;
1569
1570 out_map:
1571         /* Restore the PMD */
1572         pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1573         pmd = pmd_mkyoung(pmd);
1574         if (writable)
1575                 pmd = pmd_mkwrite(pmd);
1576         set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
1577         update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1578         spin_unlock(vmf->ptl);
1579         goto out;
1580 }
1581
1582 /*
1583  * Return true if we do MADV_FREE successfully on entire pmd page.
1584  * Otherwise, return false.
1585  */
1586 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1587                 pmd_t *pmd, unsigned long addr, unsigned long next)
1588 {
1589         spinlock_t *ptl;
1590         pmd_t orig_pmd;
1591         struct folio *folio;
1592         struct mm_struct *mm = tlb->mm;
1593         bool ret = false;
1594
1595         tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1596
1597         ptl = pmd_trans_huge_lock(pmd, vma);
1598         if (!ptl)
1599                 goto out_unlocked;
1600
1601         orig_pmd = *pmd;
1602         if (is_huge_zero_pmd(orig_pmd))
1603                 goto out;
1604
1605         if (unlikely(!pmd_present(orig_pmd))) {
1606                 VM_BUG_ON(thp_migration_supported() &&
1607                                   !is_pmd_migration_entry(orig_pmd));
1608                 goto out;
1609         }
1610
1611         folio = pfn_folio(pmd_pfn(orig_pmd));
1612         /*
1613          * If other processes are mapping this folio, we couldn't discard
1614          * the folio unless they all do MADV_FREE so let's skip the folio.
1615          */
1616         if (folio_mapcount(folio) != 1)
1617                 goto out;
1618
1619         if (!folio_trylock(folio))
1620                 goto out;
1621
1622         /*
1623          * If user want to discard part-pages of THP, split it so MADV_FREE
1624          * will deactivate only them.
1625          */
1626         if (next - addr != HPAGE_PMD_SIZE) {
1627                 folio_get(folio);
1628                 spin_unlock(ptl);
1629                 split_folio(folio);
1630                 folio_unlock(folio);
1631                 folio_put(folio);
1632                 goto out_unlocked;
1633         }
1634
1635         if (folio_test_dirty(folio))
1636                 folio_clear_dirty(folio);
1637         folio_unlock(folio);
1638
1639         if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
1640                 pmdp_invalidate(vma, addr, pmd);
1641                 orig_pmd = pmd_mkold(orig_pmd);
1642                 orig_pmd = pmd_mkclean(orig_pmd);
1643
1644                 set_pmd_at(mm, addr, pmd, orig_pmd);
1645                 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1646         }
1647
1648         folio_mark_lazyfree(folio);
1649         ret = true;
1650 out:
1651         spin_unlock(ptl);
1652 out_unlocked:
1653         return ret;
1654 }
1655
1656 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
1657 {
1658         pgtable_t pgtable;
1659
1660         pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1661         pte_free(mm, pgtable);
1662         mm_dec_nr_ptes(mm);
1663 }
1664
1665 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1666                  pmd_t *pmd, unsigned long addr)
1667 {
1668         pmd_t orig_pmd;
1669         spinlock_t *ptl;
1670
1671         tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1672
1673         ptl = __pmd_trans_huge_lock(pmd, vma);
1674         if (!ptl)
1675                 return 0;
1676         /*
1677          * For architectures like ppc64 we look at deposited pgtable
1678          * when calling pmdp_huge_get_and_clear. So do the
1679          * pgtable_trans_huge_withdraw after finishing pmdp related
1680          * operations.
1681          */
1682         orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
1683                                                 tlb->fullmm);
1684         tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1685         if (vma_is_special_huge(vma)) {
1686                 if (arch_needs_pgtable_deposit())
1687                         zap_deposited_table(tlb->mm, pmd);
1688                 spin_unlock(ptl);
1689         } else if (is_huge_zero_pmd(orig_pmd)) {
1690                 zap_deposited_table(tlb->mm, pmd);
1691                 spin_unlock(ptl);
1692         } else {
1693                 struct page *page = NULL;
1694                 int flush_needed = 1;
1695
1696                 if (pmd_present(orig_pmd)) {
1697                         page = pmd_page(orig_pmd);
1698                         page_remove_rmap(page, vma, true);
1699                         VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1700                         VM_BUG_ON_PAGE(!PageHead(page), page);
1701                 } else if (thp_migration_supported()) {
1702                         swp_entry_t entry;
1703
1704                         VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
1705                         entry = pmd_to_swp_entry(orig_pmd);
1706                         page = pfn_swap_entry_to_page(entry);
1707                         flush_needed = 0;
1708                 } else
1709                         WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1710
1711                 if (PageAnon(page)) {
1712                         zap_deposited_table(tlb->mm, pmd);
1713                         add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1714                 } else {
1715                         if (arch_needs_pgtable_deposit())
1716                                 zap_deposited_table(tlb->mm, pmd);
1717                         add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR);
1718                 }
1719
1720                 spin_unlock(ptl);
1721                 if (flush_needed)
1722                         tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
1723         }
1724         return 1;
1725 }
1726
1727 #ifndef pmd_move_must_withdraw
1728 static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
1729                                          spinlock_t *old_pmd_ptl,
1730                                          struct vm_area_struct *vma)
1731 {
1732         /*
1733          * With split pmd lock we also need to move preallocated
1734          * PTE page table if new_pmd is on different PMD page table.
1735          *
1736          * We also don't deposit and withdraw tables for file pages.
1737          */
1738         return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
1739 }
1740 #endif
1741
1742 static pmd_t move_soft_dirty_pmd(pmd_t pmd)
1743 {
1744 #ifdef CONFIG_MEM_SOFT_DIRTY
1745         if (unlikely(is_pmd_migration_entry(pmd)))
1746                 pmd = pmd_swp_mksoft_dirty(pmd);
1747         else if (pmd_present(pmd))
1748                 pmd = pmd_mksoft_dirty(pmd);
1749 #endif
1750         return pmd;
1751 }
1752
1753 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1754                   unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
1755 {
1756         spinlock_t *old_ptl, *new_ptl;
1757         pmd_t pmd;
1758         struct mm_struct *mm = vma->vm_mm;
1759         bool force_flush = false;
1760
1761         /*
1762          * The destination pmd shouldn't be established, free_pgtables()
1763          * should have release it.
1764          */
1765         if (WARN_ON(!pmd_none(*new_pmd))) {
1766                 VM_BUG_ON(pmd_trans_huge(*new_pmd));
1767                 return false;
1768         }
1769
1770         /*
1771          * We don't have to worry about the ordering of src and dst
1772          * ptlocks because exclusive mmap_lock prevents deadlock.
1773          */
1774         old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
1775         if (old_ptl) {
1776                 new_ptl = pmd_lockptr(mm, new_pmd);
1777                 if (new_ptl != old_ptl)
1778                         spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
1779                 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1780                 if (pmd_present(pmd))
1781                         force_flush = true;
1782                 VM_BUG_ON(!pmd_none(*new_pmd));
1783
1784                 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
1785                         pgtable_t pgtable;
1786                         pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1787                         pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
1788                 }
1789                 pmd = move_soft_dirty_pmd(pmd);
1790                 set_pmd_at(mm, new_addr, new_pmd, pmd);
1791                 if (force_flush)
1792                         flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
1793                 if (new_ptl != old_ptl)
1794                         spin_unlock(new_ptl);
1795                 spin_unlock(old_ptl);
1796                 return true;
1797         }
1798         return false;
1799 }
1800
1801 /*
1802  * Returns
1803  *  - 0 if PMD could not be locked
1804  *  - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
1805  *      or if prot_numa but THP migration is not supported
1806  *  - HPAGE_PMD_NR if protections changed and TLB flush necessary
1807  */
1808 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1809                     pmd_t *pmd, unsigned long addr, pgprot_t newprot,
1810                     unsigned long cp_flags)
1811 {
1812         struct mm_struct *mm = vma->vm_mm;
1813         spinlock_t *ptl;
1814         pmd_t oldpmd, entry;
1815         bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
1816         bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
1817         bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
1818         int ret = 1;
1819
1820         tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1821
1822         if (prot_numa && !thp_migration_supported())
1823                 return 1;
1824
1825         ptl = __pmd_trans_huge_lock(pmd, vma);
1826         if (!ptl)
1827                 return 0;
1828
1829 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1830         if (is_swap_pmd(*pmd)) {
1831                 swp_entry_t entry = pmd_to_swp_entry(*pmd);
1832                 struct page *page = pfn_swap_entry_to_page(entry);
1833                 pmd_t newpmd;
1834
1835                 VM_BUG_ON(!is_pmd_migration_entry(*pmd));
1836                 if (is_writable_migration_entry(entry)) {
1837                         /*
1838                          * A protection check is difficult so
1839                          * just be safe and disable write
1840                          */
1841                         if (PageAnon(page))
1842                                 entry = make_readable_exclusive_migration_entry(swp_offset(entry));
1843                         else
1844                                 entry = make_readable_migration_entry(swp_offset(entry));
1845                         newpmd = swp_entry_to_pmd(entry);
1846                         if (pmd_swp_soft_dirty(*pmd))
1847                                 newpmd = pmd_swp_mksoft_dirty(newpmd);
1848                 } else {
1849                         newpmd = *pmd;
1850                 }
1851
1852                 if (uffd_wp)
1853                         newpmd = pmd_swp_mkuffd_wp(newpmd);
1854                 else if (uffd_wp_resolve)
1855                         newpmd = pmd_swp_clear_uffd_wp(newpmd);
1856                 if (!pmd_same(*pmd, newpmd))
1857                         set_pmd_at(mm, addr, pmd, newpmd);
1858                 goto unlock;
1859         }
1860 #endif
1861
1862         if (prot_numa) {
1863                 struct page *page;
1864                 bool toptier;
1865                 /*
1866                  * Avoid trapping faults against the zero page. The read-only
1867                  * data is likely to be read-cached on the local CPU and
1868                  * local/remote hits to the zero page are not interesting.
1869                  */
1870                 if (is_huge_zero_pmd(*pmd))
1871                         goto unlock;
1872
1873                 if (pmd_protnone(*pmd))
1874                         goto unlock;
1875
1876                 page = pmd_page(*pmd);
1877                 toptier = node_is_toptier(page_to_nid(page));
1878                 /*
1879                  * Skip scanning top tier node if normal numa
1880                  * balancing is disabled
1881                  */
1882                 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
1883                     toptier)
1884                         goto unlock;
1885
1886                 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
1887                     !toptier)
1888                         xchg_page_access_time(page, jiffies_to_msecs(jiffies));
1889         }
1890         /*
1891          * In case prot_numa, we are under mmap_read_lock(mm). It's critical
1892          * to not clear pmd intermittently to avoid race with MADV_DONTNEED
1893          * which is also under mmap_read_lock(mm):
1894          *
1895          *      CPU0:                           CPU1:
1896          *                              change_huge_pmd(prot_numa=1)
1897          *                               pmdp_huge_get_and_clear_notify()
1898          * madvise_dontneed()
1899          *  zap_pmd_range()
1900          *   pmd_trans_huge(*pmd) == 0 (without ptl)
1901          *   // skip the pmd
1902          *                               set_pmd_at();
1903          *                               // pmd is re-established
1904          *
1905          * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
1906          * which may break userspace.
1907          *
1908          * pmdp_invalidate_ad() is required to make sure we don't miss
1909          * dirty/young flags set by hardware.
1910          */
1911         oldpmd = pmdp_invalidate_ad(vma, addr, pmd);
1912
1913         entry = pmd_modify(oldpmd, newprot);
1914         if (uffd_wp)
1915                 entry = pmd_mkuffd_wp(entry);
1916         else if (uffd_wp_resolve)
1917                 /*
1918                  * Leave the write bit to be handled by PF interrupt
1919                  * handler, then things like COW could be properly
1920                  * handled.
1921                  */
1922                 entry = pmd_clear_uffd_wp(entry);
1923
1924         /* See change_pte_range(). */
1925         if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) &&
1926             can_change_pmd_writable(vma, addr, entry))
1927                 entry = pmd_mkwrite(entry);
1928
1929         ret = HPAGE_PMD_NR;
1930         set_pmd_at(mm, addr, pmd, entry);
1931
1932         if (huge_pmd_needs_flush(oldpmd, entry))
1933                 tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE);
1934 unlock:
1935         spin_unlock(ptl);
1936         return ret;
1937 }
1938
1939 /*
1940  * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
1941  *
1942  * Note that if it returns page table lock pointer, this routine returns without
1943  * unlocking page table lock. So callers must unlock it.
1944  */
1945 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
1946 {
1947         spinlock_t *ptl;
1948         ptl = pmd_lock(vma->vm_mm, pmd);
1949         if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
1950                         pmd_devmap(*pmd)))
1951                 return ptl;
1952         spin_unlock(ptl);
1953         return NULL;
1954 }
1955
1956 /*
1957  * Returns page table lock pointer if a given pud maps a thp, NULL otherwise.
1958  *
1959  * Note that if it returns page table lock pointer, this routine returns without
1960  * unlocking page table lock. So callers must unlock it.
1961  */
1962 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
1963 {
1964         spinlock_t *ptl;
1965
1966         ptl = pud_lock(vma->vm_mm, pud);
1967         if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
1968                 return ptl;
1969         spin_unlock(ptl);
1970         return NULL;
1971 }
1972
1973 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1974 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
1975                  pud_t *pud, unsigned long addr)
1976 {
1977         spinlock_t *ptl;
1978
1979         ptl = __pud_trans_huge_lock(pud, vma);
1980         if (!ptl)
1981                 return 0;
1982
1983         pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm);
1984         tlb_remove_pud_tlb_entry(tlb, pud, addr);
1985         if (vma_is_special_huge(vma)) {
1986                 spin_unlock(ptl);
1987                 /* No zero page support yet */
1988         } else {
1989                 /* No support for anonymous PUD pages yet */
1990                 BUG();
1991         }
1992         return 1;
1993 }
1994
1995 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
1996                 unsigned long haddr)
1997 {
1998         VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
1999         VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2000         VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
2001         VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
2002
2003         count_vm_event(THP_SPLIT_PUD);
2004
2005         pudp_huge_clear_flush_notify(vma, haddr, pud);
2006 }
2007
2008 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2009                 unsigned long address)
2010 {
2011         spinlock_t *ptl;
2012         struct mmu_notifier_range range;
2013
2014         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2015                                 address & HPAGE_PUD_MASK,
2016                                 (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
2017         mmu_notifier_invalidate_range_start(&range);
2018         ptl = pud_lock(vma->vm_mm, pud);
2019         if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
2020                 goto out;
2021         __split_huge_pud_locked(vma, pud, range.start);
2022
2023 out:
2024         spin_unlock(ptl);
2025         /*
2026          * No need to double call mmu_notifier->invalidate_range() callback as
2027          * the above pudp_huge_clear_flush_notify() did already call it.
2028          */
2029         mmu_notifier_invalidate_range_only_end(&range);
2030 }
2031 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2032
2033 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2034                 unsigned long haddr, pmd_t *pmd)
2035 {
2036         struct mm_struct *mm = vma->vm_mm;
2037         pgtable_t pgtable;
2038         pmd_t _pmd, old_pmd;
2039         int i;
2040
2041         /*
2042          * Leave pmd empty until pte is filled note that it is fine to delay
2043          * notification until mmu_notifier_invalidate_range_end() as we are
2044          * replacing a zero pmd write protected page with a zero pte write
2045          * protected page.
2046          *
2047          * See Documentation/mm/mmu_notifier.rst
2048          */
2049         old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
2050
2051         pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2052         pmd_populate(mm, &_pmd, pgtable);
2053
2054         for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2055                 pte_t *pte, entry;
2056                 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
2057                 entry = pte_mkspecial(entry);
2058                 if (pmd_uffd_wp(old_pmd))
2059                         entry = pte_mkuffd_wp(entry);
2060                 pte = pte_offset_map(&_pmd, haddr);
2061                 VM_BUG_ON(!pte_none(*pte));
2062                 set_pte_at(mm, haddr, pte, entry);
2063                 pte_unmap(pte);
2064         }
2065         smp_wmb(); /* make pte visible before pmd */
2066         pmd_populate(mm, pmd, pgtable);
2067 }
2068
2069 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2070                 unsigned long haddr, bool freeze)
2071 {
2072         struct mm_struct *mm = vma->vm_mm;
2073         struct page *page;
2074         pgtable_t pgtable;
2075         pmd_t old_pmd, _pmd;
2076         bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
2077         bool anon_exclusive = false, dirty = false;
2078         unsigned long addr;
2079         int i;
2080
2081         VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2082         VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2083         VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
2084         VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
2085                                 && !pmd_devmap(*pmd));
2086
2087         count_vm_event(THP_SPLIT_PMD);
2088
2089         if (!vma_is_anonymous(vma)) {
2090                 old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
2091                 /*
2092                  * We are going to unmap this huge page. So
2093                  * just go ahead and zap it
2094                  */
2095                 if (arch_needs_pgtable_deposit())
2096                         zap_deposited_table(mm, pmd);
2097                 if (vma_is_special_huge(vma))
2098                         return;
2099                 if (unlikely(is_pmd_migration_entry(old_pmd))) {
2100                         swp_entry_t entry;
2101
2102                         entry = pmd_to_swp_entry(old_pmd);
2103                         page = pfn_swap_entry_to_page(entry);
2104                 } else {
2105                         page = pmd_page(old_pmd);
2106                         if (!PageDirty(page) && pmd_dirty(old_pmd))
2107                                 set_page_dirty(page);
2108                         if (!PageReferenced(page) && pmd_young(old_pmd))
2109                                 SetPageReferenced(page);
2110                         page_remove_rmap(page, vma, true);
2111                         put_page(page);
2112                 }
2113                 add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
2114                 return;
2115         }
2116
2117         if (is_huge_zero_pmd(*pmd)) {
2118                 /*
2119                  * FIXME: Do we want to invalidate secondary mmu by calling
2120                  * mmu_notifier_invalidate_range() see comments below inside
2121                  * __split_huge_pmd() ?
2122                  *
2123                  * We are going from a zero huge page write protected to zero
2124                  * small page also write protected so it does not seems useful
2125                  * to invalidate secondary mmu at this time.
2126                  */
2127                 return __split_huge_zero_page_pmd(vma, haddr, pmd);
2128         }
2129
2130         /*
2131          * Up to this point the pmd is present and huge and userland has the
2132          * whole access to the hugepage during the split (which happens in
2133          * place). If we overwrite the pmd with the not-huge version pointing
2134          * to the pte here (which of course we could if all CPUs were bug
2135          * free), userland could trigger a small page size TLB miss on the
2136          * small sized TLB while the hugepage TLB entry is still established in
2137          * the huge TLB. Some CPU doesn't like that.
2138          * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
2139          * 383 on page 105. Intel should be safe but is also warns that it's
2140          * only safe if the permission and cache attributes of the two entries
2141          * loaded in the two TLB is identical (which should be the case here).
2142          * But it is generally safer to never allow small and huge TLB entries
2143          * for the same virtual address to be loaded simultaneously. So instead
2144          * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
2145          * current pmd notpresent (atomically because here the pmd_trans_huge
2146          * must remain set at all times on the pmd until the split is complete
2147          * for this pmd), then we flush the SMP TLB and finally we write the
2148          * non-huge version of the pmd entry with pmd_populate.
2149          */
2150         old_pmd = pmdp_invalidate(vma, haddr, pmd);
2151
2152         pmd_migration = is_pmd_migration_entry(old_pmd);
2153         if (unlikely(pmd_migration)) {
2154                 swp_entry_t entry;
2155
2156                 entry = pmd_to_swp_entry(old_pmd);
2157                 page = pfn_swap_entry_to_page(entry);
2158                 write = is_writable_migration_entry(entry);
2159                 if (PageAnon(page))
2160                         anon_exclusive = is_readable_exclusive_migration_entry(entry);
2161                 young = is_migration_entry_young(entry);
2162                 dirty = is_migration_entry_dirty(entry);
2163                 soft_dirty = pmd_swp_soft_dirty(old_pmd);
2164                 uffd_wp = pmd_swp_uffd_wp(old_pmd);
2165         } else {
2166                 page = pmd_page(old_pmd);
2167                 if (pmd_dirty(old_pmd)) {
2168                         dirty = true;
2169                         SetPageDirty(page);
2170                 }
2171                 write = pmd_write(old_pmd);
2172                 young = pmd_young(old_pmd);
2173                 soft_dirty = pmd_soft_dirty(old_pmd);
2174                 uffd_wp = pmd_uffd_wp(old_pmd);
2175
2176                 VM_BUG_ON_PAGE(!page_count(page), page);
2177
2178                 /*
2179                  * Without "freeze", we'll simply split the PMD, propagating the
2180                  * PageAnonExclusive() flag for each PTE by setting it for
2181                  * each subpage -- no need to (temporarily) clear.
2182                  *
2183                  * With "freeze" we want to replace mapped pages by
2184                  * migration entries right away. This is only possible if we
2185                  * managed to clear PageAnonExclusive() -- see
2186                  * set_pmd_migration_entry().
2187                  *
2188                  * In case we cannot clear PageAnonExclusive(), split the PMD
2189                  * only and let try_to_migrate_one() fail later.
2190                  *
2191                  * See page_try_share_anon_rmap(): invalidate PMD first.
2192                  */
2193                 anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
2194                 if (freeze && anon_exclusive && page_try_share_anon_rmap(page))
2195                         freeze = false;
2196                 if (!freeze)
2197                         page_ref_add(page, HPAGE_PMD_NR - 1);
2198         }
2199
2200         /*
2201          * Withdraw the table only after we mark the pmd entry invalid.
2202          * This's critical for some architectures (Power).
2203          */
2204         pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2205         pmd_populate(mm, &_pmd, pgtable);
2206
2207         for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2208                 pte_t entry, *pte;
2209                 /*
2210                  * Note that NUMA hinting access restrictions are not
2211                  * transferred to avoid any possibility of altering
2212                  * permissions across VMAs.
2213                  */
2214                 if (freeze || pmd_migration) {
2215                         swp_entry_t swp_entry;
2216                         if (write)
2217                                 swp_entry = make_writable_migration_entry(
2218                                                         page_to_pfn(page + i));
2219                         else if (anon_exclusive)
2220                                 swp_entry = make_readable_exclusive_migration_entry(
2221                                                         page_to_pfn(page + i));
2222                         else
2223                                 swp_entry = make_readable_migration_entry(
2224                                                         page_to_pfn(page + i));
2225                         if (young)
2226                                 swp_entry = make_migration_entry_young(swp_entry);
2227                         if (dirty)
2228                                 swp_entry = make_migration_entry_dirty(swp_entry);
2229                         entry = swp_entry_to_pte(swp_entry);
2230                         if (soft_dirty)
2231                                 entry = pte_swp_mksoft_dirty(entry);
2232                         if (uffd_wp)
2233                                 entry = pte_swp_mkuffd_wp(entry);
2234                 } else {
2235                         entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
2236                         if (write)
2237                                 entry = pte_mkwrite(entry);
2238                         if (anon_exclusive)
2239                                 SetPageAnonExclusive(page + i);
2240                         if (!young)
2241                                 entry = pte_mkold(entry);
2242                         /* NOTE: this may set soft-dirty too on some archs */
2243                         if (dirty)
2244                                 entry = pte_mkdirty(entry);
2245                         if (soft_dirty)
2246                                 entry = pte_mksoft_dirty(entry);
2247                         if (uffd_wp)
2248                                 entry = pte_mkuffd_wp(entry);
2249                         page_add_anon_rmap(page + i, vma, addr, false);
2250                 }
2251                 pte = pte_offset_map(&_pmd, addr);
2252                 BUG_ON(!pte_none(*pte));
2253                 set_pte_at(mm, addr, pte, entry);
2254                 pte_unmap(pte);
2255         }
2256
2257         if (!pmd_migration)
2258                 page_remove_rmap(page, vma, true);
2259         if (freeze)
2260                 put_page(page);
2261
2262         smp_wmb(); /* make pte visible before pmd */
2263         pmd_populate(mm, pmd, pgtable);
2264 }
2265
2266 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
2267                 unsigned long address, bool freeze, struct folio *folio)
2268 {
2269         spinlock_t *ptl;
2270         struct mmu_notifier_range range;
2271
2272         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2273                                 address & HPAGE_PMD_MASK,
2274                                 (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
2275         mmu_notifier_invalidate_range_start(&range);
2276         ptl = pmd_lock(vma->vm_mm, pmd);
2277
2278         /*
2279          * If caller asks to setup a migration entry, we need a folio to check
2280          * pmd against. Otherwise we can end up replacing wrong folio.
2281          */
2282         VM_BUG_ON(freeze && !folio);
2283         VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
2284
2285         if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
2286             is_pmd_migration_entry(*pmd)) {
2287                 /*
2288                  * It's safe to call pmd_page when folio is set because it's
2289                  * guaranteed that pmd is present.
2290                  */
2291                 if (folio && folio != page_folio(pmd_page(*pmd)))
2292                         goto out;
2293                 __split_huge_pmd_locked(vma, pmd, range.start, freeze);
2294         }
2295
2296 out:
2297         spin_unlock(ptl);
2298         /*
2299          * No need to double call mmu_notifier->invalidate_range() callback.
2300          * They are 3 cases to consider inside __split_huge_pmd_locked():
2301          *  1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious
2302          *  2) __split_huge_zero_page_pmd() read only zero page and any write
2303          *    fault will trigger a flush_notify before pointing to a new page
2304          *    (it is fine if the secondary mmu keeps pointing to the old zero
2305          *    page in the meantime)
2306          *  3) Split a huge pmd into pte pointing to the same page. No need
2307          *     to invalidate secondary tlb entry they are all still valid.
2308          *     any further changes to individual pte will notify. So no need
2309          *     to call mmu_notifier->invalidate_range()
2310          */
2311         mmu_notifier_invalidate_range_only_end(&range);
2312 }
2313
2314 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
2315                 bool freeze, struct folio *folio)
2316 {
2317         pmd_t *pmd = mm_find_pmd(vma->vm_mm, address);
2318
2319         if (!pmd)
2320                 return;
2321
2322         __split_huge_pmd(vma, pmd, address, freeze, folio);
2323 }
2324
2325 static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
2326 {
2327         /*
2328          * If the new address isn't hpage aligned and it could previously
2329          * contain an hugepage: check if we need to split an huge pmd.
2330          */
2331         if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) &&
2332             range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE),
2333                          ALIGN(address, HPAGE_PMD_SIZE)))
2334                 split_huge_pmd_address(vma, address, false, NULL);
2335 }
2336
2337 void vma_adjust_trans_huge(struct vm_area_struct *vma,
2338                              unsigned long start,
2339                              unsigned long end,
2340                              long adjust_next)
2341 {
2342         /* Check if we need to split start first. */
2343         split_huge_pmd_if_needed(vma, start);
2344
2345         /* Check if we need to split end next. */
2346         split_huge_pmd_if_needed(vma, end);
2347
2348         /*
2349          * If we're also updating the next vma vm_start,
2350          * check if we need to split it.
2351          */
2352         if (adjust_next > 0) {
2353                 struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end);
2354                 unsigned long nstart = next->vm_start;
2355                 nstart += adjust_next;
2356                 split_huge_pmd_if_needed(next, nstart);
2357         }
2358 }
2359
2360 static void unmap_folio(struct folio *folio)
2361 {
2362         enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
2363                 TTU_SYNC;
2364
2365         VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
2366
2367         /*
2368          * Anon pages need migration entries to preserve them, but file
2369          * pages can simply be left unmapped, then faulted back on demand.
2370          * If that is ever changed (perhaps for mlock), update remap_page().
2371          */
2372         if (folio_test_anon(folio))
2373                 try_to_migrate(folio, ttu_flags);
2374         else
2375                 try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
2376 }
2377
2378 static void remap_page(struct folio *folio, unsigned long nr)
2379 {
2380         int i = 0;
2381
2382         /* If unmap_folio() uses try_to_migrate() on file, remove this check */
2383         if (!folio_test_anon(folio))
2384                 return;
2385         for (;;) {
2386                 remove_migration_ptes(folio, folio, true);
2387                 i += folio_nr_pages(folio);
2388                 if (i >= nr)
2389                         break;
2390                 folio = folio_next(folio);
2391         }
2392 }
2393
2394 static void lru_add_page_tail(struct page *head, struct page *tail,
2395                 struct lruvec *lruvec, struct list_head *list)
2396 {
2397         VM_BUG_ON_PAGE(!PageHead(head), head);
2398         VM_BUG_ON_PAGE(PageCompound(tail), head);
2399         VM_BUG_ON_PAGE(PageLRU(tail), head);
2400         lockdep_assert_held(&lruvec->lru_lock);
2401
2402         if (list) {
2403                 /* page reclaim is reclaiming a huge page */
2404                 VM_WARN_ON(PageLRU(head));
2405                 get_page(tail);
2406                 list_add_tail(&tail->lru, list);
2407         } else {
2408                 /* head is still on lru (and we have it frozen) */
2409                 VM_WARN_ON(!PageLRU(head));
2410                 if (PageUnevictable(tail))
2411                         tail->mlock_count = 0;
2412                 else
2413                         list_add_tail(&tail->lru, &head->lru);
2414                 SetPageLRU(tail);
2415         }
2416 }
2417
2418 static void __split_huge_page_tail(struct page *head, int tail,
2419                 struct lruvec *lruvec, struct list_head *list)
2420 {
2421         struct page *page_tail = head + tail;
2422
2423         VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
2424
2425         /*
2426          * Clone page flags before unfreezing refcount.
2427          *
2428          * After successful get_page_unless_zero() might follow flags change,
2429          * for example lock_page() which set PG_waiters.
2430          *
2431          * Note that for mapped sub-pages of an anonymous THP,
2432          * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
2433          * the migration entry instead from where remap_page() will restore it.
2434          * We can still have PG_anon_exclusive set on effectively unmapped and
2435          * unreferenced sub-pages of an anonymous THP: we can simply drop
2436          * PG_anon_exclusive (-> PG_mappedtodisk) for these here.
2437          */
2438         page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
2439         page_tail->flags |= (head->flags &
2440                         ((1L << PG_referenced) |
2441                          (1L << PG_swapbacked) |
2442                          (1L << PG_swapcache) |
2443                          (1L << PG_mlocked) |
2444                          (1L << PG_uptodate) |
2445                          (1L << PG_active) |
2446                          (1L << PG_workingset) |
2447                          (1L << PG_locked) |
2448                          (1L << PG_unevictable) |
2449 #ifdef CONFIG_ARCH_USES_PG_ARCH_X
2450                          (1L << PG_arch_2) |
2451                          (1L << PG_arch_3) |
2452 #endif
2453                          (1L << PG_dirty) |
2454                          LRU_GEN_MASK | LRU_REFS_MASK));
2455
2456         /* ->mapping in first and second tail page is replaced by other uses */
2457         VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
2458                         page_tail);
2459         page_tail->mapping = head->mapping;
2460         page_tail->index = head->index + tail;
2461
2462         /*
2463          * page->private should not be set in tail pages with the exception
2464          * of swap cache pages that store the swp_entry_t in tail pages.
2465          * Fix up and warn once if private is unexpectedly set.
2466          *
2467          * What of 32-bit systems, on which folio->_pincount overlays
2468          * head[1].private?  No problem: THP_SWAP is not enabled on 32-bit, and
2469          * pincount must be 0 for folio_ref_freeze() to have succeeded.
2470          */
2471         if (!folio_test_swapcache(page_folio(head))) {
2472                 VM_WARN_ON_ONCE_PAGE(page_tail->private != 0, page_tail);
2473                 page_tail->private = 0;
2474         }
2475
2476         /* Page flags must be visible before we make the page non-compound. */
2477         smp_wmb();
2478
2479         /*
2480          * Clear PageTail before unfreezing page refcount.
2481          *
2482          * After successful get_page_unless_zero() might follow put_page()
2483          * which needs correct compound_head().
2484          */
2485         clear_compound_head(page_tail);
2486
2487         /* Finally unfreeze refcount. Additional reference from page cache. */
2488         page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) ||
2489                                           PageSwapCache(head)));
2490
2491         if (page_is_young(head))
2492                 set_page_young(page_tail);
2493         if (page_is_idle(head))
2494                 set_page_idle(page_tail);
2495
2496         page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
2497
2498         /*
2499          * always add to the tail because some iterators expect new
2500          * pages to show after the currently processed elements - e.g.
2501          * migrate_pages
2502          */
2503         lru_add_page_tail(head, page_tail, lruvec, list);
2504 }
2505
2506 static void __split_huge_page(struct page *page, struct list_head *list,
2507                 pgoff_t end)
2508 {
2509         struct folio *folio = page_folio(page);
2510         struct page *head = &folio->page;
2511         struct lruvec *lruvec;
2512         struct address_space *swap_cache = NULL;
2513         unsigned long offset = 0;
2514         unsigned int nr = thp_nr_pages(head);
2515         int i;
2516
2517         /* complete memcg works before add pages to LRU */
2518         split_page_memcg(head, nr);
2519
2520         if (PageAnon(head) && PageSwapCache(head)) {
2521                 swp_entry_t entry = { .val = page_private(head) };
2522
2523                 offset = swp_offset(entry);
2524                 swap_cache = swap_address_space(entry);
2525                 xa_lock(&swap_cache->i_pages);
2526         }
2527
2528         /* lock lru list/PageCompound, ref frozen by page_ref_freeze */
2529         lruvec = folio_lruvec_lock(folio);
2530
2531         ClearPageHasHWPoisoned(head);
2532
2533         for (i = nr - 1; i >= 1; i--) {
2534                 __split_huge_page_tail(head, i, lruvec, list);
2535                 /* Some pages can be beyond EOF: drop them from page cache */
2536                 if (head[i].index >= end) {
2537                         struct folio *tail = page_folio(head + i);
2538
2539                         if (shmem_mapping(head->mapping))
2540                                 shmem_uncharge(head->mapping->host, 1);
2541                         else if (folio_test_clear_dirty(tail))
2542                                 folio_account_cleaned(tail,
2543                                         inode_to_wb(folio->mapping->host));
2544                         __filemap_remove_folio(tail, NULL);
2545                         folio_put(tail);
2546                 } else if (!PageAnon(page)) {
2547                         __xa_store(&head->mapping->i_pages, head[i].index,
2548                                         head + i, 0);
2549                 } else if (swap_cache) {
2550                         __xa_store(&swap_cache->i_pages, offset + i,
2551                                         head + i, 0);
2552                 }
2553         }
2554
2555         ClearPageCompound(head);
2556         unlock_page_lruvec(lruvec);
2557         /* Caller disabled irqs, so they are still disabled here */
2558
2559         split_page_owner(head, nr);
2560
2561         /* See comment in __split_huge_page_tail() */
2562         if (PageAnon(head)) {
2563                 /* Additional pin to swap cache */
2564                 if (PageSwapCache(head)) {
2565                         page_ref_add(head, 2);
2566                         xa_unlock(&swap_cache->i_pages);
2567                 } else {
2568                         page_ref_inc(head);
2569                 }
2570         } else {
2571                 /* Additional pin to page cache */
2572                 page_ref_add(head, 2);
2573                 xa_unlock(&head->mapping->i_pages);
2574         }
2575         local_irq_enable();
2576
2577         remap_page(folio, nr);
2578
2579         if (PageSwapCache(head)) {
2580                 swp_entry_t entry = { .val = page_private(head) };
2581
2582                 split_swap_cluster(entry);
2583         }
2584
2585         for (i = 0; i < nr; i++) {
2586                 struct page *subpage = head + i;
2587                 if (subpage == page)
2588                         continue;
2589                 unlock_page(subpage);
2590
2591                 /*
2592                  * Subpages may be freed if there wasn't any mapping
2593                  * like if add_to_swap() is running on a lru page that
2594                  * had its mapping zapped. And freeing these pages
2595                  * requires taking the lru_lock so we do the put_page
2596                  * of the tail pages after the split is complete.
2597                  */
2598                 free_page_and_swap_cache(subpage);
2599         }
2600 }
2601
2602 /* Racy check whether the huge page can be split */
2603 bool can_split_folio(struct folio *folio, int *pextra_pins)
2604 {
2605         int extra_pins;
2606
2607         /* Additional pins from page cache */
2608         if (folio_test_anon(folio))
2609                 extra_pins = folio_test_swapcache(folio) ?
2610                                 folio_nr_pages(folio) : 0;
2611         else
2612                 extra_pins = folio_nr_pages(folio);
2613         if (pextra_pins)
2614                 *pextra_pins = extra_pins;
2615         return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1;
2616 }
2617
2618 /*
2619  * This function splits huge page into normal pages. @page can point to any
2620  * subpage of huge page to split. Split doesn't change the position of @page.
2621  *
2622  * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
2623  * The huge page must be locked.
2624  *
2625  * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
2626  *
2627  * Both head page and tail pages will inherit mapping, flags, and so on from
2628  * the hugepage.
2629  *
2630  * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
2631  * they are not mapped.
2632  *
2633  * Returns 0 if the hugepage is split successfully.
2634  * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
2635  * us.
2636  */
2637 int split_huge_page_to_list(struct page *page, struct list_head *list)
2638 {
2639         struct folio *folio = page_folio(page);
2640         struct deferred_split *ds_queue = get_deferred_split_queue(folio);
2641         XA_STATE(xas, &folio->mapping->i_pages, folio->index);
2642         struct anon_vma *anon_vma = NULL;
2643         struct address_space *mapping = NULL;
2644         int extra_pins, ret;
2645         pgoff_t end;
2646         bool is_hzp;
2647
2648         VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2649         VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
2650
2651         is_hzp = is_huge_zero_page(&folio->page);
2652         if (is_hzp) {
2653                 pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
2654                 return -EBUSY;
2655         }
2656
2657         if (folio_test_writeback(folio))
2658                 return -EBUSY;
2659
2660         if (folio_test_anon(folio)) {
2661                 /*
2662                  * The caller does not necessarily hold an mmap_lock that would
2663                  * prevent the anon_vma disappearing so we first we take a
2664                  * reference to it and then lock the anon_vma for write. This
2665                  * is similar to folio_lock_anon_vma_read except the write lock
2666                  * is taken to serialise against parallel split or collapse
2667                  * operations.
2668                  */
2669                 anon_vma = folio_get_anon_vma(folio);
2670                 if (!anon_vma) {
2671                         ret = -EBUSY;
2672                         goto out;
2673                 }
2674                 end = -1;
2675                 mapping = NULL;
2676                 anon_vma_lock_write(anon_vma);
2677         } else {
2678                 gfp_t gfp;
2679
2680                 mapping = folio->mapping;
2681
2682                 /* Truncated ? */
2683                 if (!mapping) {
2684                         ret = -EBUSY;
2685                         goto out;
2686                 }
2687
2688                 gfp = current_gfp_context(mapping_gfp_mask(mapping) &
2689                                                         GFP_RECLAIM_MASK);
2690
2691                 if (folio_test_private(folio) &&
2692                                 !filemap_release_folio(folio, gfp)) {
2693                         ret = -EBUSY;
2694                         goto out;
2695                 }
2696
2697                 xas_split_alloc(&xas, folio, folio_order(folio), gfp);
2698                 if (xas_error(&xas)) {
2699                         ret = xas_error(&xas);
2700                         goto out;
2701                 }
2702
2703                 anon_vma = NULL;
2704                 i_mmap_lock_read(mapping);
2705
2706                 /*
2707                  *__split_huge_page() may need to trim off pages beyond EOF:
2708                  * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
2709                  * which cannot be nested inside the page tree lock. So note
2710                  * end now: i_size itself may be changed at any moment, but
2711                  * folio lock is good enough to serialize the trimming.
2712                  */
2713                 end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
2714                 if (shmem_mapping(mapping))
2715                         end = shmem_fallocend(mapping->host, end);
2716         }
2717
2718         /*
2719          * Racy check if we can split the page, before unmap_folio() will
2720          * split PMDs
2721          */
2722         if (!can_split_folio(folio, &extra_pins)) {
2723                 ret = -EAGAIN;
2724                 goto out_unlock;
2725         }
2726
2727         unmap_folio(folio);
2728
2729         /* block interrupt reentry in xa_lock and spinlock */
2730         local_irq_disable();
2731         if (mapping) {
2732                 /*
2733                  * Check if the folio is present in page cache.
2734                  * We assume all tail are present too, if folio is there.
2735                  */
2736                 xas_lock(&xas);
2737                 xas_reset(&xas);
2738                 if (xas_load(&xas) != folio)
2739                         goto fail;
2740         }
2741
2742         /* Prevent deferred_split_scan() touching ->_refcount */
2743         spin_lock(&ds_queue->split_queue_lock);
2744         if (folio_ref_freeze(folio, 1 + extra_pins)) {
2745                 if (!list_empty(&folio->_deferred_list)) {
2746                         ds_queue->split_queue_len--;
2747                         list_del(&folio->_deferred_list);
2748                 }
2749                 spin_unlock(&ds_queue->split_queue_lock);
2750                 if (mapping) {
2751                         int nr = folio_nr_pages(folio);
2752
2753                         xas_split(&xas, folio, folio_order(folio));
2754                         if (folio_test_swapbacked(folio)) {
2755                                 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS,
2756                                                         -nr);
2757                         } else {
2758                                 __lruvec_stat_mod_folio(folio, NR_FILE_THPS,
2759                                                         -nr);
2760                                 filemap_nr_thps_dec(mapping);
2761                         }
2762                 }
2763
2764                 __split_huge_page(page, list, end);
2765                 ret = 0;
2766         } else {
2767                 spin_unlock(&ds_queue->split_queue_lock);
2768 fail:
2769                 if (mapping)
2770                         xas_unlock(&xas);
2771                 local_irq_enable();
2772                 remap_page(folio, folio_nr_pages(folio));
2773                 ret = -EAGAIN;
2774         }
2775
2776 out_unlock:
2777         if (anon_vma) {
2778                 anon_vma_unlock_write(anon_vma);
2779                 put_anon_vma(anon_vma);
2780         }
2781         if (mapping)
2782                 i_mmap_unlock_read(mapping);
2783 out:
2784         xas_destroy(&xas);
2785         count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
2786         return ret;
2787 }
2788
2789 void free_transhuge_page(struct page *page)
2790 {
2791         struct folio *folio = (struct folio *)page;
2792         struct deferred_split *ds_queue = get_deferred_split_queue(folio);
2793         unsigned long flags;
2794
2795         spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2796         if (!list_empty(&folio->_deferred_list)) {
2797                 ds_queue->split_queue_len--;
2798                 list_del(&folio->_deferred_list);
2799         }
2800         spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2801         free_compound_page(page);
2802 }
2803
2804 void deferred_split_folio(struct folio *folio)
2805 {
2806         struct deferred_split *ds_queue = get_deferred_split_queue(folio);
2807 #ifdef CONFIG_MEMCG
2808         struct mem_cgroup *memcg = folio_memcg(folio);
2809 #endif
2810         unsigned long flags;
2811
2812         VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
2813
2814         /*
2815          * The try_to_unmap() in page reclaim path might reach here too,
2816          * this may cause a race condition to corrupt deferred split queue.
2817          * And, if page reclaim is already handling the same folio, it is
2818          * unnecessary to handle it again in shrinker.
2819          *
2820          * Check the swapcache flag to determine if the folio is being
2821          * handled by page reclaim since THP swap would add the folio into
2822          * swap cache before calling try_to_unmap().
2823          */
2824         if (folio_test_swapcache(folio))
2825                 return;
2826
2827         if (!list_empty(&folio->_deferred_list))
2828                 return;
2829
2830         spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2831         if (list_empty(&folio->_deferred_list)) {
2832                 count_vm_event(THP_DEFERRED_SPLIT_PAGE);
2833                 list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
2834                 ds_queue->split_queue_len++;
2835 #ifdef CONFIG_MEMCG
2836                 if (memcg)
2837                         set_shrinker_bit(memcg, folio_nid(folio),
2838                                          deferred_split_shrinker.id);
2839 #endif
2840         }
2841         spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2842 }
2843
2844 static unsigned long deferred_split_count(struct shrinker *shrink,
2845                 struct shrink_control *sc)
2846 {
2847         struct pglist_data *pgdata = NODE_DATA(sc->nid);
2848         struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
2849
2850 #ifdef CONFIG_MEMCG
2851         if (sc->memcg)
2852                 ds_queue = &sc->memcg->deferred_split_queue;
2853 #endif
2854         return READ_ONCE(ds_queue->split_queue_len);
2855 }
2856
2857 static unsigned long deferred_split_scan(struct shrinker *shrink,
2858                 struct shrink_control *sc)
2859 {
2860         struct pglist_data *pgdata = NODE_DATA(sc->nid);
2861         struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
2862         unsigned long flags;
2863         LIST_HEAD(list);
2864         struct folio *folio, *next;
2865         int split = 0;
2866
2867 #ifdef CONFIG_MEMCG
2868         if (sc->memcg)
2869                 ds_queue = &sc->memcg->deferred_split_queue;
2870 #endif
2871
2872         spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2873         /* Take pin on all head pages to avoid freeing them under us */
2874         list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
2875                                                         _deferred_list) {
2876                 if (folio_try_get(folio)) {
2877                         list_move(&folio->_deferred_list, &list);
2878                 } else {
2879                         /* We lost race with folio_put() */
2880                         list_del_init(&folio->_deferred_list);
2881                         ds_queue->split_queue_len--;
2882                 }
2883                 if (!--sc->nr_to_scan)
2884                         break;
2885         }
2886         spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2887
2888         list_for_each_entry_safe(folio, next, &list, _deferred_list) {
2889                 if (!folio_trylock(folio))
2890                         goto next;
2891                 /* split_huge_page() removes page from list on success */
2892                 if (!split_folio(folio))
2893                         split++;
2894                 folio_unlock(folio);
2895 next:
2896                 folio_put(folio);
2897         }
2898
2899         spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2900         list_splice_tail(&list, &ds_queue->split_queue);
2901         spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2902
2903         /*
2904          * Stop shrinker if we didn't split any page, but the queue is empty.
2905          * This can happen if pages were freed under us.
2906          */
2907         if (!split && list_empty(&ds_queue->split_queue))
2908                 return SHRINK_STOP;
2909         return split;
2910 }
2911
2912 static struct shrinker deferred_split_shrinker = {
2913         .count_objects = deferred_split_count,
2914         .scan_objects = deferred_split_scan,
2915         .seeks = DEFAULT_SEEKS,
2916         .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE |
2917                  SHRINKER_NONSLAB,
2918 };
2919
2920 #ifdef CONFIG_DEBUG_FS
2921 static void split_huge_pages_all(void)
2922 {
2923         struct zone *zone;
2924         struct page *page;
2925         struct folio *folio;
2926         unsigned long pfn, max_zone_pfn;
2927         unsigned long total = 0, split = 0;
2928
2929         pr_debug("Split all THPs\n");
2930         for_each_zone(zone) {
2931                 if (!managed_zone(zone))
2932                         continue;
2933                 max_zone_pfn = zone_end_pfn(zone);
2934                 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
2935                         int nr_pages;
2936
2937                         page = pfn_to_online_page(pfn);
2938                         if (!page || PageTail(page))
2939                                 continue;
2940                         folio = page_folio(page);
2941                         if (!folio_try_get(folio))
2942                                 continue;
2943
2944                         if (unlikely(page_folio(page) != folio))
2945                                 goto next;
2946
2947                         if (zone != folio_zone(folio))
2948                                 goto next;
2949
2950                         if (!folio_test_large(folio)
2951                                 || folio_test_hugetlb(folio)
2952                                 || !folio_test_lru(folio))
2953                                 goto next;
2954
2955                         total++;
2956                         folio_lock(folio);
2957                         nr_pages = folio_nr_pages(folio);
2958                         if (!split_folio(folio))
2959                                 split++;
2960                         pfn += nr_pages - 1;
2961                         folio_unlock(folio);
2962 next:
2963                         folio_put(folio);
2964                         cond_resched();
2965                 }
2966         }
2967
2968         pr_debug("%lu of %lu THP split\n", split, total);
2969 }
2970
2971 static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
2972 {
2973         return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) ||
2974                     is_vm_hugetlb_page(vma);
2975 }
2976
2977 static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
2978                                 unsigned long vaddr_end)
2979 {
2980         int ret = 0;
2981         struct task_struct *task;
2982         struct mm_struct *mm;
2983         unsigned long total = 0, split = 0;
2984         unsigned long addr;
2985
2986         vaddr_start &= PAGE_MASK;
2987         vaddr_end &= PAGE_MASK;
2988
2989         /* Find the task_struct from pid */
2990         rcu_read_lock();
2991         task = find_task_by_vpid(pid);
2992         if (!task) {
2993                 rcu_read_unlock();
2994                 ret = -ESRCH;
2995                 goto out;
2996         }
2997         get_task_struct(task);
2998         rcu_read_unlock();
2999
3000         /* Find the mm_struct */
3001         mm = get_task_mm(task);
3002         put_task_struct(task);
3003
3004         if (!mm) {
3005                 ret = -EINVAL;
3006                 goto out;
3007         }
3008
3009         pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n",
3010                  pid, vaddr_start, vaddr_end);
3011
3012         mmap_read_lock(mm);
3013         /*
3014          * always increase addr by PAGE_SIZE, since we could have a PTE page
3015          * table filled with PTE-mapped THPs, each of which is distinct.
3016          */
3017         for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
3018                 struct vm_area_struct *vma = vma_lookup(mm, addr);
3019                 struct page *page;
3020
3021                 if (!vma)
3022                         break;
3023
3024                 /* skip special VMA and hugetlb VMA */
3025                 if (vma_not_suitable_for_thp_split(vma)) {
3026                         addr = vma->vm_end;
3027                         continue;
3028                 }
3029
3030                 /* FOLL_DUMP to ignore special (like zero) pages */
3031                 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
3032
3033                 if (IS_ERR_OR_NULL(page))
3034                         continue;
3035
3036                 if (!is_transparent_hugepage(page))
3037                         goto next;
3038
3039                 total++;
3040                 if (!can_split_folio(page_folio(page), NULL))
3041                         goto next;
3042
3043                 if (!trylock_page(page))
3044                         goto next;
3045
3046                 if (!split_huge_page(page))
3047                         split++;
3048
3049                 unlock_page(page);
3050 next:
3051                 put_page(page);
3052                 cond_resched();
3053         }
3054         mmap_read_unlock(mm);
3055         mmput(mm);
3056
3057         pr_debug("%lu of %lu THP split\n", split, total);
3058
3059 out:
3060         return ret;
3061 }
3062
3063 static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
3064                                 pgoff_t off_end)
3065 {
3066         struct filename *file;
3067         struct file *candidate;
3068         struct address_space *mapping;
3069         int ret = -EINVAL;
3070         pgoff_t index;
3071         int nr_pages = 1;
3072         unsigned long total = 0, split = 0;
3073
3074         file = getname_kernel(file_path);
3075         if (IS_ERR(file))
3076                 return ret;
3077
3078         candidate = file_open_name(file, O_RDONLY, 0);
3079         if (IS_ERR(candidate))
3080                 goto out;
3081
3082         pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n",
3083                  file_path, off_start, off_end);
3084
3085         mapping = candidate->f_mapping;
3086
3087         for (index = off_start; index < off_end; index += nr_pages) {
3088                 struct folio *folio = filemap_get_folio(mapping, index);
3089
3090                 nr_pages = 1;
3091                 if (IS_ERR(folio))
3092                         continue;
3093
3094                 if (!folio_test_large(folio))
3095                         goto next;
3096
3097                 total++;
3098                 nr_pages = folio_nr_pages(folio);
3099
3100                 if (!folio_trylock(folio))
3101                         goto next;
3102
3103                 if (!split_folio(folio))
3104                         split++;
3105
3106                 folio_unlock(folio);
3107 next:
3108                 folio_put(folio);
3109                 cond_resched();
3110         }
3111
3112         filp_close(candidate, NULL);
3113         ret = 0;
3114
3115         pr_debug("%lu of %lu file-backed THP split\n", split, total);
3116 out:
3117         putname(file);
3118         return ret;
3119 }
3120
3121 #define MAX_INPUT_BUF_SZ 255
3122
3123 static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
3124                                 size_t count, loff_t *ppops)
3125 {
3126         static DEFINE_MUTEX(split_debug_mutex);
3127         ssize_t ret;
3128         /* hold pid, start_vaddr, end_vaddr or file_path, off_start, off_end */
3129         char input_buf[MAX_INPUT_BUF_SZ];
3130         int pid;
3131         unsigned long vaddr_start, vaddr_end;
3132
3133         ret = mutex_lock_interruptible(&split_debug_mutex);
3134         if (ret)
3135                 return ret;
3136
3137         ret = -EFAULT;
3138
3139         memset(input_buf, 0, MAX_INPUT_BUF_SZ);
3140         if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ)))
3141                 goto out;
3142
3143         input_buf[MAX_INPUT_BUF_SZ - 1] = '\0';
3144
3145         if (input_buf[0] == '/') {
3146                 char *tok;
3147                 char *buf = input_buf;
3148                 char file_path[MAX_INPUT_BUF_SZ];
3149                 pgoff_t off_start = 0, off_end = 0;
3150                 size_t input_len = strlen(input_buf);
3151
3152                 tok = strsep(&buf, ",");
3153                 if (tok) {
3154                         strcpy(file_path, tok);
3155                 } else {
3156                         ret = -EINVAL;
3157                         goto out;
3158                 }
3159
3160                 ret = sscanf(buf, "0x%lx,0x%lx", &off_start, &off_end);
3161                 if (ret != 2) {
3162                         ret = -EINVAL;
3163                         goto out;
3164                 }
3165                 ret = split_huge_pages_in_file(file_path, off_start, off_end);
3166                 if (!ret)
3167                         ret = input_len;
3168
3169                 goto out;
3170         }
3171
3172         ret = sscanf(input_buf, "%d,0x%lx,0x%lx", &pid, &vaddr_start, &vaddr_end);
3173         if (ret == 1 && pid == 1) {
3174                 split_huge_pages_all();
3175                 ret = strlen(input_buf);
3176                 goto out;
3177         } else if (ret != 3) {
3178                 ret = -EINVAL;
3179                 goto out;
3180         }
3181
3182         ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end);
3183         if (!ret)
3184                 ret = strlen(input_buf);
3185 out:
3186         mutex_unlock(&split_debug_mutex);
3187         return ret;
3188
3189 }
3190
3191 static const struct file_operations split_huge_pages_fops = {
3192         .owner   = THIS_MODULE,
3193         .write   = split_huge_pages_write,
3194         .llseek  = no_llseek,
3195 };
3196
3197 static int __init split_huge_pages_debugfs(void)
3198 {
3199         debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
3200                             &split_huge_pages_fops);
3201         return 0;
3202 }
3203 late_initcall(split_huge_pages_debugfs);
3204 #endif
3205
3206 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
3207 int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
3208                 struct page *page)
3209 {
3210         struct vm_area_struct *vma = pvmw->vma;
3211         struct mm_struct *mm = vma->vm_mm;
3212         unsigned long address = pvmw->address;
3213         bool anon_exclusive;
3214         pmd_t pmdval;
3215         swp_entry_t entry;
3216         pmd_t pmdswp;
3217
3218         if (!(pvmw->pmd && !pvmw->pte))
3219                 return 0;
3220
3221         flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
3222         pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
3223
3224         /* See page_try_share_anon_rmap(): invalidate PMD first. */
3225         anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
3226         if (anon_exclusive && page_try_share_anon_rmap(page)) {
3227                 set_pmd_at(mm, address, pvmw->pmd, pmdval);
3228                 return -EBUSY;
3229         }
3230
3231         if (pmd_dirty(pmdval))
3232                 set_page_dirty(page);
3233         if (pmd_write(pmdval))
3234                 entry = make_writable_migration_entry(page_to_pfn(page));
3235         else if (anon_exclusive)
3236                 entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
3237         else
3238                 entry = make_readable_migration_entry(page_to_pfn(page));
3239         if (pmd_young(pmdval))
3240                 entry = make_migration_entry_young(entry);
3241         if (pmd_dirty(pmdval))
3242                 entry = make_migration_entry_dirty(entry);
3243         pmdswp = swp_entry_to_pmd(entry);
3244         if (pmd_soft_dirty(pmdval))
3245                 pmdswp = pmd_swp_mksoft_dirty(pmdswp);
3246         if (pmd_uffd_wp(pmdval))
3247                 pmdswp = pmd_swp_mkuffd_wp(pmdswp);
3248         set_pmd_at(mm, address, pvmw->pmd, pmdswp);
3249         page_remove_rmap(page, vma, true);
3250         put_page(page);
3251         trace_set_migration_pmd(address, pmd_val(pmdswp));
3252
3253         return 0;
3254 }
3255
3256 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
3257 {
3258         struct vm_area_struct *vma = pvmw->vma;
3259         struct mm_struct *mm = vma->vm_mm;
3260         unsigned long address = pvmw->address;
3261         unsigned long haddr = address & HPAGE_PMD_MASK;
3262         pmd_t pmde;
3263         swp_entry_t entry;
3264
3265         if (!(pvmw->pmd && !pvmw->pte))
3266                 return;
3267
3268         entry = pmd_to_swp_entry(*pvmw->pmd);
3269         get_page(new);
3270         pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
3271         if (pmd_swp_soft_dirty(*pvmw->pmd))
3272                 pmde = pmd_mksoft_dirty(pmde);
3273         if (is_writable_migration_entry(entry))
3274                 pmde = pmd_mkwrite(pmde);
3275         if (pmd_swp_uffd_wp(*pvmw->pmd))
3276                 pmde = pmd_mkuffd_wp(pmde);
3277         if (!is_migration_entry_young(entry))
3278                 pmde = pmd_mkold(pmde);
3279         /* NOTE: this may contain setting soft-dirty on some archs */
3280         if (PageDirty(new) && is_migration_entry_dirty(entry))
3281                 pmde = pmd_mkdirty(pmde);
3282
3283         if (PageAnon(new)) {
3284                 rmap_t rmap_flags = RMAP_COMPOUND;
3285
3286                 if (!is_readable_migration_entry(entry))
3287                         rmap_flags |= RMAP_EXCLUSIVE;
3288
3289                 page_add_anon_rmap(new, vma, haddr, rmap_flags);
3290         } else {
3291                 page_add_file_rmap(new, vma, true);
3292         }
3293         VM_BUG_ON(pmd_write(pmde) && PageAnon(new) && !PageAnonExclusive(new));
3294         set_pmd_at(mm, haddr, pvmw->pmd, pmde);
3295
3296         /* No need to invalidate - it was non-present before */
3297         update_mmu_cache_pmd(vma, address, pvmw->pmd);
3298         trace_remove_migration_pmd(address, pmd_val(pmde));
3299 }
3300 #endif