1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/swapops.h>
20 #include <linux/shmem_fs.h>
23 #include <asm/pgalloc.h>
34 SCAN_LACK_REFERENCED_PAGE,
48 SCAN_ALLOC_HUGE_PAGE_FAIL,
49 SCAN_CGROUP_CHARGE_FAIL,
52 SCAN_PAGE_HAS_PRIVATE,
55 #define CREATE_TRACE_POINTS
56 #include <trace/events/huge_memory.h>
58 /* default scan 8*512 pte (or vmas) every 30 second */
59 static unsigned int khugepaged_pages_to_scan __read_mostly;
60 static unsigned int khugepaged_pages_collapsed;
61 static unsigned int khugepaged_full_scans;
62 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
63 /* during fragmentation poll the hugepage allocator once every minute */
64 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
65 static unsigned long khugepaged_sleep_expire;
66 static DEFINE_SPINLOCK(khugepaged_mm_lock);
67 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
69 * default collapse hugepages if there is at least one pte mapped like
70 * it would have happened if the vma was large enough during page
73 static unsigned int khugepaged_max_ptes_none __read_mostly;
74 static unsigned int khugepaged_max_ptes_swap __read_mostly;
76 #define MM_SLOTS_HASH_BITS 10
77 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
79 static struct kmem_cache *mm_slot_cache __read_mostly;
81 #define MAX_PTE_MAPPED_THP 8
84 * struct mm_slot - hash lookup from mm to mm_slot
85 * @hash: hash collision list
86 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
87 * @mm: the mm that this information is valid for
90 struct hlist_node hash;
91 struct list_head mm_node;
94 /* pte-mapped THP in this mm */
95 int nr_pte_mapped_thp;
96 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
100 * struct khugepaged_scan - cursor for scanning
101 * @mm_head: the head of the mm list to scan
102 * @mm_slot: the current mm_slot we are scanning
103 * @address: the next address inside that to be scanned
105 * There is only the one khugepaged_scan instance of this cursor structure.
107 struct khugepaged_scan {
108 struct list_head mm_head;
109 struct mm_slot *mm_slot;
110 unsigned long address;
113 static struct khugepaged_scan khugepaged_scan = {
114 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
118 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
119 struct kobj_attribute *attr,
122 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
125 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
126 struct kobj_attribute *attr,
127 const char *buf, size_t count)
132 err = kstrtoul(buf, 10, &msecs);
133 if (err || msecs > UINT_MAX)
136 khugepaged_scan_sleep_millisecs = msecs;
137 khugepaged_sleep_expire = 0;
138 wake_up_interruptible(&khugepaged_wait);
142 static struct kobj_attribute scan_sleep_millisecs_attr =
143 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
144 scan_sleep_millisecs_store);
146 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
147 struct kobj_attribute *attr,
150 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
153 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
154 struct kobj_attribute *attr,
155 const char *buf, size_t count)
160 err = kstrtoul(buf, 10, &msecs);
161 if (err || msecs > UINT_MAX)
164 khugepaged_alloc_sleep_millisecs = msecs;
165 khugepaged_sleep_expire = 0;
166 wake_up_interruptible(&khugepaged_wait);
170 static struct kobj_attribute alloc_sleep_millisecs_attr =
171 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
172 alloc_sleep_millisecs_store);
174 static ssize_t pages_to_scan_show(struct kobject *kobj,
175 struct kobj_attribute *attr,
178 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
180 static ssize_t pages_to_scan_store(struct kobject *kobj,
181 struct kobj_attribute *attr,
182 const char *buf, size_t count)
187 err = kstrtoul(buf, 10, &pages);
188 if (err || !pages || pages > UINT_MAX)
191 khugepaged_pages_to_scan = pages;
195 static struct kobj_attribute pages_to_scan_attr =
196 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
197 pages_to_scan_store);
199 static ssize_t pages_collapsed_show(struct kobject *kobj,
200 struct kobj_attribute *attr,
203 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
205 static struct kobj_attribute pages_collapsed_attr =
206 __ATTR_RO(pages_collapsed);
208 static ssize_t full_scans_show(struct kobject *kobj,
209 struct kobj_attribute *attr,
212 return sprintf(buf, "%u\n", khugepaged_full_scans);
214 static struct kobj_attribute full_scans_attr =
215 __ATTR_RO(full_scans);
217 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
218 struct kobj_attribute *attr, char *buf)
220 return single_hugepage_flag_show(kobj, attr, buf,
221 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
223 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
224 struct kobj_attribute *attr,
225 const char *buf, size_t count)
227 return single_hugepage_flag_store(kobj, attr, buf, count,
228 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
230 static struct kobj_attribute khugepaged_defrag_attr =
231 __ATTR(defrag, 0644, khugepaged_defrag_show,
232 khugepaged_defrag_store);
235 * max_ptes_none controls if khugepaged should collapse hugepages over
236 * any unmapped ptes in turn potentially increasing the memory
237 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
238 * reduce the available free memory in the system as it
239 * runs. Increasing max_ptes_none will instead potentially reduce the
240 * free memory in the system during the khugepaged scan.
242 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
243 struct kobj_attribute *attr,
246 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
248 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
249 struct kobj_attribute *attr,
250 const char *buf, size_t count)
253 unsigned long max_ptes_none;
255 err = kstrtoul(buf, 10, &max_ptes_none);
256 if (err || max_ptes_none > HPAGE_PMD_NR-1)
259 khugepaged_max_ptes_none = max_ptes_none;
263 static struct kobj_attribute khugepaged_max_ptes_none_attr =
264 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
265 khugepaged_max_ptes_none_store);
267 static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
268 struct kobj_attribute *attr,
271 return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
274 static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
275 struct kobj_attribute *attr,
276 const char *buf, size_t count)
279 unsigned long max_ptes_swap;
281 err = kstrtoul(buf, 10, &max_ptes_swap);
282 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
285 khugepaged_max_ptes_swap = max_ptes_swap;
290 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
291 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
292 khugepaged_max_ptes_swap_store);
294 static struct attribute *khugepaged_attr[] = {
295 &khugepaged_defrag_attr.attr,
296 &khugepaged_max_ptes_none_attr.attr,
297 &pages_to_scan_attr.attr,
298 &pages_collapsed_attr.attr,
299 &full_scans_attr.attr,
300 &scan_sleep_millisecs_attr.attr,
301 &alloc_sleep_millisecs_attr.attr,
302 &khugepaged_max_ptes_swap_attr.attr,
306 struct attribute_group khugepaged_attr_group = {
307 .attrs = khugepaged_attr,
308 .name = "khugepaged",
310 #endif /* CONFIG_SYSFS */
312 int hugepage_madvise(struct vm_area_struct *vma,
313 unsigned long *vm_flags, int advice)
319 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
320 * can't handle this properly after s390_enable_sie, so we simply
321 * ignore the madvise to prevent qemu from causing a SIGSEGV.
323 if (mm_has_pgste(vma->vm_mm))
326 *vm_flags &= ~VM_NOHUGEPAGE;
327 *vm_flags |= VM_HUGEPAGE;
329 * If the vma become good for khugepaged to scan,
330 * register it here without waiting a page fault that
331 * may not happen any time soon.
333 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
334 khugepaged_enter_vma_merge(vma, *vm_flags))
337 case MADV_NOHUGEPAGE:
338 *vm_flags &= ~VM_HUGEPAGE;
339 *vm_flags |= VM_NOHUGEPAGE;
341 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
342 * this vma even if we leave the mm registered in khugepaged if
343 * it got registered before VM_NOHUGEPAGE was set.
351 int __init khugepaged_init(void)
353 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
354 sizeof(struct mm_slot),
355 __alignof__(struct mm_slot), 0, NULL);
359 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
360 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
361 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
366 void __init khugepaged_destroy(void)
368 kmem_cache_destroy(mm_slot_cache);
371 static inline struct mm_slot *alloc_mm_slot(void)
373 if (!mm_slot_cache) /* initialization failed */
375 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
378 static inline void free_mm_slot(struct mm_slot *mm_slot)
380 kmem_cache_free(mm_slot_cache, mm_slot);
383 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
385 struct mm_slot *mm_slot;
387 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
388 if (mm == mm_slot->mm)
394 static void insert_to_mm_slots_hash(struct mm_struct *mm,
395 struct mm_slot *mm_slot)
398 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
401 static inline int khugepaged_test_exit(struct mm_struct *mm)
403 return atomic_read(&mm->mm_users) == 0;
406 static bool hugepage_vma_check(struct vm_area_struct *vma,
407 unsigned long vm_flags)
409 if ((!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
410 (vm_flags & VM_NOHUGEPAGE) ||
411 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
414 if (shmem_file(vma->vm_file) ||
415 (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
417 (vm_flags & VM_DENYWRITE))) {
418 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
421 if (!vma->anon_vma || vma->vm_ops)
423 if (vma_is_temporary_stack(vma))
425 return !(vm_flags & VM_NO_KHUGEPAGED);
428 int __khugepaged_enter(struct mm_struct *mm)
430 struct mm_slot *mm_slot;
433 mm_slot = alloc_mm_slot();
437 /* __khugepaged_exit() must not run from under us */
438 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
439 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
440 free_mm_slot(mm_slot);
444 spin_lock(&khugepaged_mm_lock);
445 insert_to_mm_slots_hash(mm, mm_slot);
447 * Insert just behind the scanning cursor, to let the area settle
450 wakeup = list_empty(&khugepaged_scan.mm_head);
451 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
452 spin_unlock(&khugepaged_mm_lock);
456 wake_up_interruptible(&khugepaged_wait);
461 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
462 unsigned long vm_flags)
464 unsigned long hstart, hend;
467 * khugepaged only supports read-only files for non-shmem files.
468 * khugepaged does not yet work on special mappings. And
469 * file-private shmem THP is not supported.
471 if (!hugepage_vma_check(vma, vm_flags))
474 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
475 hend = vma->vm_end & HPAGE_PMD_MASK;
477 return khugepaged_enter(vma, vm_flags);
481 void __khugepaged_exit(struct mm_struct *mm)
483 struct mm_slot *mm_slot;
486 spin_lock(&khugepaged_mm_lock);
487 mm_slot = get_mm_slot(mm);
488 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
489 hash_del(&mm_slot->hash);
490 list_del(&mm_slot->mm_node);
493 spin_unlock(&khugepaged_mm_lock);
496 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
497 free_mm_slot(mm_slot);
499 } else if (mm_slot) {
501 * This is required to serialize against
502 * khugepaged_test_exit() (which is guaranteed to run
503 * under mmap sem read mode). Stop here (after we
504 * return all pagetables will be destroyed) until
505 * khugepaged has finished working on the pagetables
506 * under the mmap_sem.
508 down_write(&mm->mmap_sem);
509 up_write(&mm->mmap_sem);
513 static void release_pte_page(struct page *page)
515 dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_lru(page));
517 putback_lru_page(page);
520 static void release_pte_pages(pte_t *pte, pte_t *_pte)
522 while (--_pte >= pte) {
523 pte_t pteval = *_pte;
524 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
525 release_pte_page(pte_page(pteval));
529 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
530 unsigned long address,
533 struct page *page = NULL;
535 int none_or_zero = 0, result = 0, referenced = 0;
536 bool writable = false;
538 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
539 _pte++, address += PAGE_SIZE) {
540 pte_t pteval = *_pte;
541 if (pte_none(pteval) || (pte_present(pteval) &&
542 is_zero_pfn(pte_pfn(pteval)))) {
543 if (!userfaultfd_armed(vma) &&
544 ++none_or_zero <= khugepaged_max_ptes_none) {
547 result = SCAN_EXCEED_NONE_PTE;
551 if (!pte_present(pteval)) {
552 result = SCAN_PTE_NON_PRESENT;
555 page = vm_normal_page(vma, address, pteval);
556 if (unlikely(!page)) {
557 result = SCAN_PAGE_NULL;
561 /* TODO: teach khugepaged to collapse THP mapped with pte */
562 if (PageCompound(page)) {
563 result = SCAN_PAGE_COMPOUND;
567 VM_BUG_ON_PAGE(!PageAnon(page), page);
570 * We can do it before isolate_lru_page because the
571 * page can't be freed from under us. NOTE: PG_lock
572 * is needed to serialize against split_huge_page
573 * when invoked from the VM.
575 if (!trylock_page(page)) {
576 result = SCAN_PAGE_LOCK;
581 * cannot use mapcount: can't collapse if there's a gup pin.
582 * The page must only be referenced by the scanned process
583 * and page swap cache.
585 if (page_count(page) != 1 + PageSwapCache(page)) {
587 result = SCAN_PAGE_COUNT;
590 if (pte_write(pteval)) {
593 if (PageSwapCache(page) &&
594 !reuse_swap_page(page, NULL)) {
596 result = SCAN_SWAP_CACHE_PAGE;
600 * Page is not in the swap cache. It can be collapsed
606 * Isolate the page to avoid collapsing an hugepage
607 * currently in use by the VM.
609 if (isolate_lru_page(page)) {
611 result = SCAN_DEL_PAGE_LRU;
614 inc_node_page_state(page,
615 NR_ISOLATED_ANON + page_is_file_lru(page));
616 VM_BUG_ON_PAGE(!PageLocked(page), page);
617 VM_BUG_ON_PAGE(PageLRU(page), page);
619 /* There should be enough young pte to collapse the page */
620 if (pte_young(pteval) ||
621 page_is_young(page) || PageReferenced(page) ||
622 mmu_notifier_test_young(vma->vm_mm, address))
625 if (likely(writable)) {
626 if (likely(referenced)) {
627 result = SCAN_SUCCEED;
628 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
629 referenced, writable, result);
633 result = SCAN_PAGE_RO;
637 release_pte_pages(pte, _pte);
638 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
639 referenced, writable, result);
643 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
644 struct vm_area_struct *vma,
645 unsigned long address,
649 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
650 _pte++, page++, address += PAGE_SIZE) {
651 pte_t pteval = *_pte;
652 struct page *src_page;
654 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
655 clear_user_highpage(page, address);
656 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
657 if (is_zero_pfn(pte_pfn(pteval))) {
659 * ptl mostly unnecessary.
663 * paravirt calls inside pte_clear here are
666 pte_clear(vma->vm_mm, address, _pte);
670 src_page = pte_page(pteval);
671 copy_user_highpage(page, src_page, address, vma);
672 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
673 release_pte_page(src_page);
675 * ptl mostly unnecessary, but preempt has to
676 * be disabled to update the per-cpu stats
677 * inside page_remove_rmap().
681 * paravirt calls inside pte_clear here are
684 pte_clear(vma->vm_mm, address, _pte);
685 page_remove_rmap(src_page, false);
687 free_page_and_swap_cache(src_page);
692 static void khugepaged_alloc_sleep(void)
696 add_wait_queue(&khugepaged_wait, &wait);
697 freezable_schedule_timeout_interruptible(
698 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
699 remove_wait_queue(&khugepaged_wait, &wait);
702 static int khugepaged_node_load[MAX_NUMNODES];
704 static bool khugepaged_scan_abort(int nid)
709 * If node_reclaim_mode is disabled, then no extra effort is made to
710 * allocate memory locally.
712 if (!node_reclaim_mode)
715 /* If there is a count for this node already, it must be acceptable */
716 if (khugepaged_node_load[nid])
719 for (i = 0; i < MAX_NUMNODES; i++) {
720 if (!khugepaged_node_load[i])
722 if (node_distance(nid, i) > node_reclaim_distance)
728 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
729 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
731 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
735 static int khugepaged_find_target_node(void)
737 static int last_khugepaged_target_node = NUMA_NO_NODE;
738 int nid, target_node = 0, max_value = 0;
740 /* find first node with max normal pages hit */
741 for (nid = 0; nid < MAX_NUMNODES; nid++)
742 if (khugepaged_node_load[nid] > max_value) {
743 max_value = khugepaged_node_load[nid];
747 /* do some balance if several nodes have the same hit record */
748 if (target_node <= last_khugepaged_target_node)
749 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
751 if (max_value == khugepaged_node_load[nid]) {
756 last_khugepaged_target_node = target_node;
760 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
762 if (IS_ERR(*hpage)) {
768 khugepaged_alloc_sleep();
778 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
780 VM_BUG_ON_PAGE(*hpage, *hpage);
782 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
783 if (unlikely(!*hpage)) {
784 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
785 *hpage = ERR_PTR(-ENOMEM);
789 prep_transhuge_page(*hpage);
790 count_vm_event(THP_COLLAPSE_ALLOC);
794 static int khugepaged_find_target_node(void)
799 static inline struct page *alloc_khugepaged_hugepage(void)
803 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
806 prep_transhuge_page(page);
810 static struct page *khugepaged_alloc_hugepage(bool *wait)
815 hpage = alloc_khugepaged_hugepage();
817 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
822 khugepaged_alloc_sleep();
824 count_vm_event(THP_COLLAPSE_ALLOC);
825 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
830 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
833 *hpage = khugepaged_alloc_hugepage(wait);
835 if (unlikely(!*hpage))
842 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
851 * If mmap_sem temporarily dropped, revalidate vma
852 * before taking mmap_sem.
853 * Return 0 if succeeds, otherwise return none-zero
857 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
858 struct vm_area_struct **vmap)
860 struct vm_area_struct *vma;
861 unsigned long hstart, hend;
863 if (unlikely(khugepaged_test_exit(mm)))
864 return SCAN_ANY_PROCESS;
866 *vmap = vma = find_vma(mm, address);
868 return SCAN_VMA_NULL;
870 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
871 hend = vma->vm_end & HPAGE_PMD_MASK;
872 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
873 return SCAN_ADDRESS_RANGE;
874 if (!hugepage_vma_check(vma, vma->vm_flags))
875 return SCAN_VMA_CHECK;
880 * Bring missing pages in from swap, to complete THP collapse.
881 * Only done if khugepaged_scan_pmd believes it is worthwhile.
883 * Called and returns without pte mapped or spinlocks held,
884 * but with mmap_sem held to protect against vma changes.
887 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
888 struct vm_area_struct *vma,
889 unsigned long address, pmd_t *pmd,
894 struct vm_fault vmf = {
897 .flags = FAULT_FLAG_ALLOW_RETRY,
899 .pgoff = linear_page_index(vma, address),
902 vmf.pte = pte_offset_map(pmd, address);
903 for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
904 vmf.pte++, vmf.address += PAGE_SIZE) {
905 vmf.orig_pte = *vmf.pte;
906 if (!is_swap_pte(vmf.orig_pte))
909 ret = do_swap_page(&vmf);
911 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
912 if (ret & VM_FAULT_RETRY) {
913 down_read(&mm->mmap_sem);
914 if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
915 /* vma is no longer available, don't continue to swapin */
916 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
919 /* check if the pmd is still valid */
920 if (mm_find_pmd(mm, address) != pmd) {
921 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
925 if (ret & VM_FAULT_ERROR) {
926 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
929 /* pte is unmapped now, we need to map it */
930 vmf.pte = pte_offset_map(pmd, vmf.address);
934 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
938 static void collapse_huge_page(struct mm_struct *mm,
939 unsigned long address,
941 int node, int referenced, int unmapped)
946 struct page *new_page;
947 spinlock_t *pmd_ptl, *pte_ptl;
948 int isolated = 0, result = 0;
949 struct mem_cgroup *memcg;
950 struct vm_area_struct *vma;
951 struct mmu_notifier_range range;
954 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
956 /* Only allocate from the target node */
957 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
960 * Before allocating the hugepage, release the mmap_sem read lock.
961 * The allocation can take potentially a long time if it involves
962 * sync compaction, and we do not need to hold the mmap_sem during
963 * that. We will recheck the vma after taking it again in write mode.
965 up_read(&mm->mmap_sem);
966 new_page = khugepaged_alloc_page(hpage, gfp, node);
968 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
972 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
973 result = SCAN_CGROUP_CHARGE_FAIL;
977 down_read(&mm->mmap_sem);
978 result = hugepage_vma_revalidate(mm, address, &vma);
980 mem_cgroup_cancel_charge(new_page, memcg, true);
981 up_read(&mm->mmap_sem);
985 pmd = mm_find_pmd(mm, address);
987 result = SCAN_PMD_NULL;
988 mem_cgroup_cancel_charge(new_page, memcg, true);
989 up_read(&mm->mmap_sem);
994 * __collapse_huge_page_swapin always returns with mmap_sem locked.
995 * If it fails, we release mmap_sem and jump out_nolock.
996 * Continuing to collapse causes inconsistency.
998 if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1000 mem_cgroup_cancel_charge(new_page, memcg, true);
1001 up_read(&mm->mmap_sem);
1005 up_read(&mm->mmap_sem);
1007 * Prevent all access to pagetables with the exception of
1008 * gup_fast later handled by the ptep_clear_flush and the VM
1009 * handled by the anon_vma lock + PG_lock.
1011 down_write(&mm->mmap_sem);
1012 result = SCAN_ANY_PROCESS;
1013 if (!mmget_still_valid(mm))
1015 result = hugepage_vma_revalidate(mm, address, &vma);
1018 /* check if the pmd is still valid */
1019 if (mm_find_pmd(mm, address) != pmd)
1022 anon_vma_lock_write(vma->anon_vma);
1024 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1025 address, address + HPAGE_PMD_SIZE);
1026 mmu_notifier_invalidate_range_start(&range);
1028 pte = pte_offset_map(pmd, address);
1029 pte_ptl = pte_lockptr(mm, pmd);
1031 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1033 * After this gup_fast can't run anymore. This also removes
1034 * any huge TLB entry from the CPU so we won't allow
1035 * huge and small TLB entries for the same virtual address
1036 * to avoid the risk of CPU bugs in that area.
1038 _pmd = pmdp_collapse_flush(vma, address, pmd);
1039 spin_unlock(pmd_ptl);
1040 mmu_notifier_invalidate_range_end(&range);
1043 isolated = __collapse_huge_page_isolate(vma, address, pte);
1044 spin_unlock(pte_ptl);
1046 if (unlikely(!isolated)) {
1049 BUG_ON(!pmd_none(*pmd));
1051 * We can only use set_pmd_at when establishing
1052 * hugepmds and never for establishing regular pmds that
1053 * points to regular pagetables. Use pmd_populate for that
1055 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1056 spin_unlock(pmd_ptl);
1057 anon_vma_unlock_write(vma->anon_vma);
1063 * All pages are isolated and locked so anon_vma rmap
1064 * can't run anymore.
1066 anon_vma_unlock_write(vma->anon_vma);
1068 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
1070 __SetPageUptodate(new_page);
1071 pgtable = pmd_pgtable(_pmd);
1073 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1074 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1077 * spin_lock() below is not the equivalent of smp_wmb(), so
1078 * this is needed to avoid the copy_huge_page writes to become
1079 * visible after the set_pmd_at() write.
1084 BUG_ON(!pmd_none(*pmd));
1085 page_add_new_anon_rmap(new_page, vma, address, true);
1086 mem_cgroup_commit_charge(new_page, memcg, false, true);
1087 count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
1088 lru_cache_add_active_or_unevictable(new_page, vma);
1089 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1090 set_pmd_at(mm, address, pmd, _pmd);
1091 update_mmu_cache_pmd(vma, address, pmd);
1092 spin_unlock(pmd_ptl);
1096 khugepaged_pages_collapsed++;
1097 result = SCAN_SUCCEED;
1099 up_write(&mm->mmap_sem);
1101 trace_mm_collapse_huge_page(mm, isolated, result);
1104 mem_cgroup_cancel_charge(new_page, memcg, true);
1108 static int khugepaged_scan_pmd(struct mm_struct *mm,
1109 struct vm_area_struct *vma,
1110 unsigned long address,
1111 struct page **hpage)
1115 int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
1116 struct page *page = NULL;
1117 unsigned long _address;
1119 int node = NUMA_NO_NODE, unmapped = 0;
1120 bool writable = false;
1122 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1124 pmd = mm_find_pmd(mm, address);
1126 result = SCAN_PMD_NULL;
1130 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1131 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1132 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1133 _pte++, _address += PAGE_SIZE) {
1134 pte_t pteval = *_pte;
1135 if (is_swap_pte(pteval)) {
1136 if (++unmapped <= khugepaged_max_ptes_swap) {
1138 * Always be strict with uffd-wp
1139 * enabled swap entries. Please see
1140 * comment below for pte_uffd_wp().
1142 if (pte_swp_uffd_wp(pteval)) {
1143 result = SCAN_PTE_UFFD_WP;
1148 result = SCAN_EXCEED_SWAP_PTE;
1152 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1153 if (!userfaultfd_armed(vma) &&
1154 ++none_or_zero <= khugepaged_max_ptes_none) {
1157 result = SCAN_EXCEED_NONE_PTE;
1161 if (!pte_present(pteval)) {
1162 result = SCAN_PTE_NON_PRESENT;
1165 if (pte_uffd_wp(pteval)) {
1167 * Don't collapse the page if any of the small
1168 * PTEs are armed with uffd write protection.
1169 * Here we can also mark the new huge pmd as
1170 * write protected if any of the small ones is
1171 * marked but that could bring uknown
1172 * userfault messages that falls outside of
1173 * the registered range. So, just be simple.
1175 result = SCAN_PTE_UFFD_WP;
1178 if (pte_write(pteval))
1181 page = vm_normal_page(vma, _address, pteval);
1182 if (unlikely(!page)) {
1183 result = SCAN_PAGE_NULL;
1187 /* TODO: teach khugepaged to collapse THP mapped with pte */
1188 if (PageCompound(page)) {
1189 result = SCAN_PAGE_COMPOUND;
1194 * Record which node the original page is from and save this
1195 * information to khugepaged_node_load[].
1196 * Khupaged will allocate hugepage from the node has the max
1199 node = page_to_nid(page);
1200 if (khugepaged_scan_abort(node)) {
1201 result = SCAN_SCAN_ABORT;
1204 khugepaged_node_load[node]++;
1205 if (!PageLRU(page)) {
1206 result = SCAN_PAGE_LRU;
1209 if (PageLocked(page)) {
1210 result = SCAN_PAGE_LOCK;
1213 if (!PageAnon(page)) {
1214 result = SCAN_PAGE_ANON;
1219 * cannot use mapcount: can't collapse if there's a gup pin.
1220 * The page must only be referenced by the scanned process
1221 * and page swap cache.
1223 if (page_count(page) != 1 + PageSwapCache(page)) {
1224 result = SCAN_PAGE_COUNT;
1227 if (pte_young(pteval) ||
1228 page_is_young(page) || PageReferenced(page) ||
1229 mmu_notifier_test_young(vma->vm_mm, address))
1233 result = SCAN_PAGE_RO;
1234 } else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1235 result = SCAN_LACK_REFERENCED_PAGE;
1237 result = SCAN_SUCCEED;
1241 pte_unmap_unlock(pte, ptl);
1243 node = khugepaged_find_target_node();
1244 /* collapse_huge_page will return with the mmap_sem released */
1245 collapse_huge_page(mm, address, hpage, node,
1246 referenced, unmapped);
1249 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1250 none_or_zero, result, unmapped);
1254 static void collect_mm_slot(struct mm_slot *mm_slot)
1256 struct mm_struct *mm = mm_slot->mm;
1258 lockdep_assert_held(&khugepaged_mm_lock);
1260 if (khugepaged_test_exit(mm)) {
1262 hash_del(&mm_slot->hash);
1263 list_del(&mm_slot->mm_node);
1266 * Not strictly needed because the mm exited already.
1268 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1271 /* khugepaged_mm_lock actually not necessary for the below */
1272 free_mm_slot(mm_slot);
1279 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1280 * khugepaged should try to collapse the page table.
1282 static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1285 struct mm_slot *mm_slot;
1287 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1289 spin_lock(&khugepaged_mm_lock);
1290 mm_slot = get_mm_slot(mm);
1291 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1292 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1293 spin_unlock(&khugepaged_mm_lock);
1298 * Try to collapse a pte-mapped THP for mm at address haddr.
1300 * This function checks whether all the PTEs in the PMD are pointing to the
1301 * right THP. If so, retract the page table so the THP can refault in with
1304 void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1306 unsigned long haddr = addr & HPAGE_PMD_MASK;
1307 struct vm_area_struct *vma = find_vma(mm, haddr);
1308 struct page *hpage = NULL;
1309 pte_t *start_pte, *pte;
1315 if (!vma || !vma->vm_file ||
1316 vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
1320 * This vm_flags may not have VM_HUGEPAGE if the page was not
1321 * collapsed by this mm. But we can still collapse if the page is
1322 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1323 * will not fail the vma for missing VM_HUGEPAGE
1325 if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1328 pmd = mm_find_pmd(mm, haddr);
1332 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1334 /* step 1: check all mapped PTEs are to the right huge page */
1335 for (i = 0, addr = haddr, pte = start_pte;
1336 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1339 /* empty pte, skip */
1343 /* page swapped out, abort */
1344 if (!pte_present(*pte))
1347 page = vm_normal_page(vma, addr, *pte);
1349 if (!page || !PageCompound(page))
1353 hpage = compound_head(page);
1355 * The mapping of the THP should not change.
1357 * Note that uprobe, debugger, or MAP_PRIVATE may
1358 * change the page table, but the new page will
1359 * not pass PageCompound() check.
1361 if (WARN_ON(hpage->mapping != vma->vm_file->f_mapping))
1366 * Confirm the page maps to the correct subpage.
1368 * Note that uprobe, debugger, or MAP_PRIVATE may change
1369 * the page table, but the new page will not pass
1370 * PageCompound() check.
1372 if (WARN_ON(hpage + i != page))
1377 /* step 2: adjust rmap */
1378 for (i = 0, addr = haddr, pte = start_pte;
1379 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1384 page = vm_normal_page(vma, addr, *pte);
1385 page_remove_rmap(page, false);
1388 pte_unmap_unlock(start_pte, ptl);
1390 /* step 3: set proper refcount and mm_counters. */
1392 page_ref_sub(hpage, count);
1393 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1396 /* step 4: collapse pmd */
1397 ptl = pmd_lock(vma->vm_mm, pmd);
1398 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1401 pte_free(mm, pmd_pgtable(_pmd));
1405 pte_unmap_unlock(start_pte, ptl);
1408 static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1410 struct mm_struct *mm = mm_slot->mm;
1413 if (likely(mm_slot->nr_pte_mapped_thp == 0))
1416 if (!down_write_trylock(&mm->mmap_sem))
1419 if (unlikely(khugepaged_test_exit(mm)))
1422 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1423 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1426 mm_slot->nr_pte_mapped_thp = 0;
1427 up_write(&mm->mmap_sem);
1431 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1433 struct vm_area_struct *vma;
1437 i_mmap_lock_write(mapping);
1438 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1440 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1441 * got written to. These VMAs are likely not worth investing
1442 * down_write(mmap_sem) as PMD-mapping is likely to be split
1445 * Not that vma->anon_vma check is racy: it can be set up after
1446 * the check but before we took mmap_sem by the fault path.
1447 * But page lock would prevent establishing any new ptes of the
1448 * page, so we are safe.
1450 * An alternative would be drop the check, but check that page
1451 * table is clear before calling pmdp_collapse_flush() under
1452 * ptl. It has higher chance to recover THP for the VMA, but
1453 * has higher cost too.
1457 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1458 if (addr & ~HPAGE_PMD_MASK)
1460 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1462 pmd = mm_find_pmd(vma->vm_mm, addr);
1466 * We need exclusive mmap_sem to retract page table.
1468 * We use trylock due to lock inversion: we need to acquire
1469 * mmap_sem while holding page lock. Fault path does it in
1470 * reverse order. Trylock is a way to avoid deadlock.
1472 if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
1473 spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
1474 /* assume page table is clear */
1475 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1477 up_write(&vma->vm_mm->mmap_sem);
1478 mm_dec_nr_ptes(vma->vm_mm);
1479 pte_free(vma->vm_mm, pmd_pgtable(_pmd));
1481 /* Try again later */
1482 khugepaged_add_pte_mapped_thp(vma->vm_mm, addr);
1485 i_mmap_unlock_write(mapping);
1489 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1491 * Basic scheme is simple, details are more complex:
1492 * - allocate and lock a new huge page;
1493 * - scan page cache replacing old pages with the new one
1494 * + swap/gup in pages if necessary;
1496 * + keep old pages around in case rollback is required;
1497 * - if replacing succeeds:
1500 * + unlock huge page;
1501 * - if replacing failed;
1502 * + put all pages back and unfreeze them;
1503 * + restore gaps in the page cache;
1504 * + unlock and free huge page;
1506 static void collapse_file(struct mm_struct *mm,
1507 struct file *file, pgoff_t start,
1508 struct page **hpage, int node)
1510 struct address_space *mapping = file->f_mapping;
1512 struct page *new_page;
1513 struct mem_cgroup *memcg;
1514 pgoff_t index, end = start + HPAGE_PMD_NR;
1515 LIST_HEAD(pagelist);
1516 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1517 int nr_none = 0, result = SCAN_SUCCEED;
1518 bool is_shmem = shmem_file(file);
1520 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1521 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1523 /* Only allocate from the target node */
1524 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1526 new_page = khugepaged_alloc_page(hpage, gfp, node);
1528 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1532 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
1533 result = SCAN_CGROUP_CHARGE_FAIL;
1537 /* This will be less messy when we use multi-index entries */
1540 xas_create_range(&xas);
1541 if (!xas_error(&xas))
1543 xas_unlock_irq(&xas);
1544 if (!xas_nomem(&xas, GFP_KERNEL)) {
1545 mem_cgroup_cancel_charge(new_page, memcg, true);
1551 __SetPageLocked(new_page);
1553 __SetPageSwapBacked(new_page);
1554 new_page->index = start;
1555 new_page->mapping = mapping;
1558 * At this point the new_page is locked and not up-to-date.
1559 * It's safe to insert it into the page cache, because nobody would
1560 * be able to map it or use it in another way until we unlock it.
1563 xas_set(&xas, start);
1564 for (index = start; index < end; index++) {
1565 struct page *page = xas_next(&xas);
1567 VM_BUG_ON(index != xas.xa_index);
1571 * Stop if extent has been truncated or
1572 * hole-punched, and is now completely
1575 if (index == start) {
1576 if (!xas_next_entry(&xas, end - 1)) {
1577 result = SCAN_TRUNCATED;
1580 xas_set(&xas, index);
1582 if (!shmem_charge(mapping->host, 1)) {
1586 xas_store(&xas, new_page);
1591 if (xa_is_value(page) || !PageUptodate(page)) {
1592 xas_unlock_irq(&xas);
1593 /* swap in or instantiate fallocated page */
1594 if (shmem_getpage(mapping->host, index, &page,
1599 } else if (trylock_page(page)) {
1601 xas_unlock_irq(&xas);
1603 result = SCAN_PAGE_LOCK;
1606 } else { /* !is_shmem */
1607 if (!page || xa_is_value(page)) {
1608 xas_unlock_irq(&xas);
1609 page_cache_sync_readahead(mapping, &file->f_ra,
1612 /* drain pagevecs to help isolate_lru_page() */
1614 page = find_lock_page(mapping, index);
1615 if (unlikely(page == NULL)) {
1619 } else if (PageDirty(page)) {
1621 * khugepaged only works on read-only fd,
1622 * so this page is dirty because it hasn't
1623 * been flushed since first write. There
1624 * won't be new dirty pages.
1626 * Trigger async flush here and hope the
1627 * writeback is done when khugepaged
1628 * revisits this page.
1630 * This is a one-off situation. We are not
1631 * forcing writeback in loop.
1633 xas_unlock_irq(&xas);
1634 filemap_flush(mapping);
1637 } else if (trylock_page(page)) {
1639 xas_unlock_irq(&xas);
1641 result = SCAN_PAGE_LOCK;
1647 * The page must be locked, so we can drop the i_pages lock
1648 * without racing with truncate.
1650 VM_BUG_ON_PAGE(!PageLocked(page), page);
1652 /* make sure the page is up to date */
1653 if (unlikely(!PageUptodate(page))) {
1659 * If file was truncated then extended, or hole-punched, before
1660 * we locked the first page, then a THP might be there already.
1662 if (PageTransCompound(page)) {
1663 result = SCAN_PAGE_COMPOUND;
1667 if (page_mapping(page) != mapping) {
1668 result = SCAN_TRUNCATED;
1672 if (!is_shmem && PageDirty(page)) {
1674 * khugepaged only works on read-only fd, so this
1675 * page is dirty because it hasn't been flushed
1676 * since first write.
1682 if (isolate_lru_page(page)) {
1683 result = SCAN_DEL_PAGE_LRU;
1687 if (page_has_private(page) &&
1688 !try_to_release_page(page, GFP_KERNEL)) {
1689 result = SCAN_PAGE_HAS_PRIVATE;
1690 putback_lru_page(page);
1694 if (page_mapped(page))
1695 unmap_mapping_pages(mapping, index, 1, false);
1698 xas_set(&xas, index);
1700 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
1701 VM_BUG_ON_PAGE(page_mapped(page), page);
1704 * The page is expected to have page_count() == 3:
1705 * - we hold a pin on it;
1706 * - one reference from page cache;
1707 * - one from isolate_lru_page;
1709 if (!page_ref_freeze(page, 3)) {
1710 result = SCAN_PAGE_COUNT;
1711 xas_unlock_irq(&xas);
1712 putback_lru_page(page);
1717 * Add the page to the list to be able to undo the collapse if
1718 * something go wrong.
1720 list_add_tail(&page->lru, &pagelist);
1722 /* Finally, replace with the new page. */
1723 xas_store(&xas, new_page);
1732 __inc_node_page_state(new_page, NR_SHMEM_THPS);
1734 __inc_node_page_state(new_page, NR_FILE_THPS);
1735 filemap_nr_thps_inc(mapping);
1739 struct zone *zone = page_zone(new_page);
1741 __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1743 __mod_node_page_state(zone->zone_pgdat,
1748 xas_unlock_irq(&xas);
1751 if (result == SCAN_SUCCEED) {
1752 struct page *page, *tmp;
1755 * Replacing old pages with new one has succeeded, now we
1756 * need to copy the content and free the old pages.
1759 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1760 while (index < page->index) {
1761 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1764 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1766 list_del(&page->lru);
1767 page->mapping = NULL;
1768 page_ref_unfreeze(page, 1);
1769 ClearPageActive(page);
1770 ClearPageUnevictable(page);
1775 while (index < end) {
1776 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1780 SetPageUptodate(new_page);
1781 page_ref_add(new_page, HPAGE_PMD_NR - 1);
1782 mem_cgroup_commit_charge(new_page, memcg, false, true);
1785 set_page_dirty(new_page);
1786 lru_cache_add_anon(new_page);
1788 lru_cache_add_file(new_page);
1790 count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
1793 * Remove pte page tables, so we can re-fault the page as huge.
1795 retract_page_tables(mapping, start);
1798 khugepaged_pages_collapsed++;
1802 /* Something went wrong: roll back page cache changes */
1804 mapping->nrpages -= nr_none;
1807 shmem_uncharge(mapping->host, nr_none);
1809 xas_set(&xas, start);
1810 xas_for_each(&xas, page, end - 1) {
1811 page = list_first_entry_or_null(&pagelist,
1813 if (!page || xas.xa_index < page->index) {
1817 /* Put holes back where they were */
1818 xas_store(&xas, NULL);
1822 VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
1824 /* Unfreeze the page. */
1825 list_del(&page->lru);
1826 page_ref_unfreeze(page, 2);
1827 xas_store(&xas, page);
1829 xas_unlock_irq(&xas);
1831 putback_lru_page(page);
1835 xas_unlock_irq(&xas);
1837 mem_cgroup_cancel_charge(new_page, memcg, true);
1838 new_page->mapping = NULL;
1841 unlock_page(new_page);
1843 VM_BUG_ON(!list_empty(&pagelist));
1844 /* TODO: tracepoints */
1847 static void khugepaged_scan_file(struct mm_struct *mm,
1848 struct file *file, pgoff_t start, struct page **hpage)
1850 struct page *page = NULL;
1851 struct address_space *mapping = file->f_mapping;
1852 XA_STATE(xas, &mapping->i_pages, start);
1854 int node = NUMA_NO_NODE;
1855 int result = SCAN_SUCCEED;
1859 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1861 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
1862 if (xas_retry(&xas, page))
1865 if (xa_is_value(page)) {
1866 if (++swap > khugepaged_max_ptes_swap) {
1867 result = SCAN_EXCEED_SWAP_PTE;
1873 if (PageTransCompound(page)) {
1874 result = SCAN_PAGE_COMPOUND;
1878 node = page_to_nid(page);
1879 if (khugepaged_scan_abort(node)) {
1880 result = SCAN_SCAN_ABORT;
1883 khugepaged_node_load[node]++;
1885 if (!PageLRU(page)) {
1886 result = SCAN_PAGE_LRU;
1890 if (page_count(page) !=
1891 1 + page_mapcount(page) + page_has_private(page)) {
1892 result = SCAN_PAGE_COUNT;
1897 * We probably should check if the page is referenced here, but
1898 * nobody would transfer pte_young() to PageReferenced() for us.
1899 * And rmap walk here is just too costly...
1904 if (need_resched()) {
1911 if (result == SCAN_SUCCEED) {
1912 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
1913 result = SCAN_EXCEED_NONE_PTE;
1915 node = khugepaged_find_target_node();
1916 collapse_file(mm, file, start, hpage, node);
1920 /* TODO: tracepoints */
1923 static void khugepaged_scan_file(struct mm_struct *mm,
1924 struct file *file, pgoff_t start, struct page **hpage)
1929 static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1935 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1936 struct page **hpage)
1937 __releases(&khugepaged_mm_lock)
1938 __acquires(&khugepaged_mm_lock)
1940 struct mm_slot *mm_slot;
1941 struct mm_struct *mm;
1942 struct vm_area_struct *vma;
1946 lockdep_assert_held(&khugepaged_mm_lock);
1948 if (khugepaged_scan.mm_slot)
1949 mm_slot = khugepaged_scan.mm_slot;
1951 mm_slot = list_entry(khugepaged_scan.mm_head.next,
1952 struct mm_slot, mm_node);
1953 khugepaged_scan.address = 0;
1954 khugepaged_scan.mm_slot = mm_slot;
1956 spin_unlock(&khugepaged_mm_lock);
1957 khugepaged_collapse_pte_mapped_thps(mm_slot);
1961 * Don't wait for semaphore (to avoid long wait times). Just move to
1962 * the next mm on the list.
1965 if (unlikely(!down_read_trylock(&mm->mmap_sem)))
1966 goto breakouterloop_mmap_sem;
1967 if (likely(!khugepaged_test_exit(mm)))
1968 vma = find_vma(mm, khugepaged_scan.address);
1971 for (; vma; vma = vma->vm_next) {
1972 unsigned long hstart, hend;
1975 if (unlikely(khugepaged_test_exit(mm))) {
1979 if (!hugepage_vma_check(vma, vma->vm_flags)) {
1984 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1985 hend = vma->vm_end & HPAGE_PMD_MASK;
1988 if (khugepaged_scan.address > hend)
1990 if (khugepaged_scan.address < hstart)
1991 khugepaged_scan.address = hstart;
1992 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1993 if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
1996 while (khugepaged_scan.address < hend) {
1999 if (unlikely(khugepaged_test_exit(mm)))
2000 goto breakouterloop;
2002 VM_BUG_ON(khugepaged_scan.address < hstart ||
2003 khugepaged_scan.address + HPAGE_PMD_SIZE >
2005 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2006 struct file *file = get_file(vma->vm_file);
2007 pgoff_t pgoff = linear_page_index(vma,
2008 khugepaged_scan.address);
2010 up_read(&mm->mmap_sem);
2012 khugepaged_scan_file(mm, file, pgoff, hpage);
2015 ret = khugepaged_scan_pmd(mm, vma,
2016 khugepaged_scan.address,
2019 /* move to next address */
2020 khugepaged_scan.address += HPAGE_PMD_SIZE;
2021 progress += HPAGE_PMD_NR;
2023 /* we released mmap_sem so break loop */
2024 goto breakouterloop_mmap_sem;
2025 if (progress >= pages)
2026 goto breakouterloop;
2030 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2031 breakouterloop_mmap_sem:
2033 spin_lock(&khugepaged_mm_lock);
2034 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2036 * Release the current mm_slot if this mm is about to die, or
2037 * if we scanned all vmas of this mm.
2039 if (khugepaged_test_exit(mm) || !vma) {
2041 * Make sure that if mm_users is reaching zero while
2042 * khugepaged runs here, khugepaged_exit will find
2043 * mm_slot not pointing to the exiting mm.
2045 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2046 khugepaged_scan.mm_slot = list_entry(
2047 mm_slot->mm_node.next,
2048 struct mm_slot, mm_node);
2049 khugepaged_scan.address = 0;
2051 khugepaged_scan.mm_slot = NULL;
2052 khugepaged_full_scans++;
2055 collect_mm_slot(mm_slot);
2061 static int khugepaged_has_work(void)
2063 return !list_empty(&khugepaged_scan.mm_head) &&
2064 khugepaged_enabled();
2067 static int khugepaged_wait_event(void)
2069 return !list_empty(&khugepaged_scan.mm_head) ||
2070 kthread_should_stop();
2073 static void khugepaged_do_scan(void)
2075 struct page *hpage = NULL;
2076 unsigned int progress = 0, pass_through_head = 0;
2077 unsigned int pages = khugepaged_pages_to_scan;
2080 barrier(); /* write khugepaged_pages_to_scan to local stack */
2082 lru_add_drain_all();
2084 while (progress < pages) {
2085 if (!khugepaged_prealloc_page(&hpage, &wait))
2090 if (unlikely(kthread_should_stop() || try_to_freeze()))
2093 spin_lock(&khugepaged_mm_lock);
2094 if (!khugepaged_scan.mm_slot)
2095 pass_through_head++;
2096 if (khugepaged_has_work() &&
2097 pass_through_head < 2)
2098 progress += khugepaged_scan_mm_slot(pages - progress,
2102 spin_unlock(&khugepaged_mm_lock);
2105 if (!IS_ERR_OR_NULL(hpage))
2109 static bool khugepaged_should_wakeup(void)
2111 return kthread_should_stop() ||
2112 time_after_eq(jiffies, khugepaged_sleep_expire);
2115 static void khugepaged_wait_work(void)
2117 if (khugepaged_has_work()) {
2118 const unsigned long scan_sleep_jiffies =
2119 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2121 if (!scan_sleep_jiffies)
2124 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2125 wait_event_freezable_timeout(khugepaged_wait,
2126 khugepaged_should_wakeup(),
2127 scan_sleep_jiffies);
2131 if (khugepaged_enabled())
2132 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2135 static int khugepaged(void *none)
2137 struct mm_slot *mm_slot;
2140 set_user_nice(current, MAX_NICE);
2142 while (!kthread_should_stop()) {
2143 khugepaged_do_scan();
2144 khugepaged_wait_work();
2147 spin_lock(&khugepaged_mm_lock);
2148 mm_slot = khugepaged_scan.mm_slot;
2149 khugepaged_scan.mm_slot = NULL;
2151 collect_mm_slot(mm_slot);
2152 spin_unlock(&khugepaged_mm_lock);
2156 static void set_recommended_min_free_kbytes(void)
2160 unsigned long recommended_min;
2162 for_each_populated_zone(zone) {
2164 * We don't need to worry about fragmentation of
2165 * ZONE_MOVABLE since it only has movable pages.
2167 if (zone_idx(zone) > gfp_zone(GFP_USER))
2173 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2174 recommended_min = pageblock_nr_pages * nr_zones * 2;
2177 * Make sure that on average at least two pageblocks are almost free
2178 * of another type, one for a migratetype to fall back to and a
2179 * second to avoid subsequent fallbacks of other types There are 3
2180 * MIGRATE_TYPES we care about.
2182 recommended_min += pageblock_nr_pages * nr_zones *
2183 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2185 /* don't ever allow to reserve more than 5% of the lowmem */
2186 recommended_min = min(recommended_min,
2187 (unsigned long) nr_free_buffer_pages() / 20);
2188 recommended_min <<= (PAGE_SHIFT-10);
2190 if (recommended_min > min_free_kbytes) {
2191 if (user_min_free_kbytes >= 0)
2192 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2193 min_free_kbytes, recommended_min);
2195 min_free_kbytes = recommended_min;
2197 setup_per_zone_wmarks();
2200 int start_stop_khugepaged(void)
2202 static struct task_struct *khugepaged_thread __read_mostly;
2203 static DEFINE_MUTEX(khugepaged_mutex);
2206 mutex_lock(&khugepaged_mutex);
2207 if (khugepaged_enabled()) {
2208 if (!khugepaged_thread)
2209 khugepaged_thread = kthread_run(khugepaged, NULL,
2211 if (IS_ERR(khugepaged_thread)) {
2212 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2213 err = PTR_ERR(khugepaged_thread);
2214 khugepaged_thread = NULL;
2218 if (!list_empty(&khugepaged_scan.mm_head))
2219 wake_up_interruptible(&khugepaged_wait);
2221 set_recommended_min_free_kbytes();
2222 } else if (khugepaged_thread) {
2223 kthread_stop(khugepaged_thread);
2224 khugepaged_thread = NULL;
2227 mutex_unlock(&khugepaged_mutex);