1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/swapops.h>
20 #include <linux/shmem_fs.h>
23 #include <asm/pgalloc.h>
24 #ifdef CONFIG_FINEGRAINED_THP
25 #include <asm/finegrained_thp.h>
26 #include <asm/huge_mm.h>
28 #include <asm-generic/finegrained_thp.h>
29 #include <asm-generic/huge_mm.h>
39 SCAN_EXCEED_SHARED_PTE,
43 SCAN_LACK_REFERENCED_PAGE,
57 SCAN_ALLOC_HUGE_PAGE_FAIL,
58 SCAN_CGROUP_CHARGE_FAIL,
60 SCAN_PAGE_HAS_PRIVATE,
63 #define CREATE_TRACE_POINTS
64 #include <trace/events/huge_memory.h>
66 static struct task_struct *khugepaged_thread __read_mostly;
67 static DEFINE_MUTEX(khugepaged_mutex);
69 /* default scan 8*512 pte (or vmas) every 30 second */
70 static unsigned int khugepaged_pages_to_scan __read_mostly;
71 static unsigned int khugepaged_pages_collapsed;
72 static unsigned int khugepaged_full_scans;
73 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
74 /* during fragmentation poll the hugepage allocator once every minute */
75 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
76 static unsigned long khugepaged_sleep_expire;
77 static DEFINE_SPINLOCK(khugepaged_mm_lock);
78 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
80 * default collapse hugepages if there is at least one pte mapped like
81 * it would have happened if the vma was large enough during page
84 static unsigned int khugepaged_max_ptes_none __read_mostly;
85 static unsigned int khugepaged_max_ptes_swap __read_mostly;
86 static unsigned int khugepaged_max_ptes_shared __read_mostly;
88 #ifdef CONFIG_FINEGRAINED_THP
91 * it used for providing hints to khugepaged
92 * which address space is changed recently.
94 struct thp_scan_hint {
96 struct vm_area_struct *vma;
97 unsigned long diff; /* memory difference */
98 unsigned long jiffies; /* time stamp for profiling purpose */
99 struct list_head hint_list;
102 /* THP type descriptor */
104 THP_TYPE_FAIL, /* cannot make hugepage */
105 THP_TYPE_64KB, /* 64KB hugepage can be made, use CONT_PTE */
106 THP_TYPE_2MB, /* 2MB hugepage can be made, use PMD */
109 static unsigned int khugepaged_max_ptes_none_64kb __read_mostly;
110 static unsigned int khugepaged_max_ptes_swap_64kb __read_mostly;
111 static unsigned int khugepaged_max_ptes_shared_64kb __read_mostly;
112 #endif /* CONFIG_FINEGRAINED_THP */
114 #define MM_SLOTS_HASH_BITS 10
115 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
117 static struct kmem_cache *mm_slot_cache __read_mostly;
119 #define MAX_PTE_MAPPED_THP 8
122 * struct mm_slot - hash lookup from mm to mm_slot
123 * @hash: hash collision list
124 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
125 * @mm: the mm that this information is valid for
128 struct hlist_node hash;
129 struct list_head mm_node;
130 struct mm_struct *mm;
132 /* pte-mapped THP in this mm */
133 int nr_pte_mapped_thp;
134 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
138 * struct khugepaged_scan - cursor for scanning
139 * @mm_head: the head of the mm list to scan
140 * @mm_slot: the current mm_slot we are scanning
141 * @address: the next address inside that to be scanned
143 * There is only the one khugepaged_scan instance of this cursor structure.
145 struct khugepaged_scan {
146 struct list_head mm_head;
147 struct mm_slot *mm_slot;
148 unsigned long address;
149 #ifdef CONFIG_FINEGRAINED_THP
152 struct list_head hint_list;
153 #endif /* CONFIG_FINEGRAINED_THP */
156 static struct khugepaged_scan khugepaged_scan = {
157 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
158 #ifdef CONFIG_FINEGRAINED_THP
159 .hint_list = LIST_HEAD_INIT(khugepaged_scan.hint_list),
164 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
165 struct kobj_attribute *attr,
168 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
171 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
172 struct kobj_attribute *attr,
173 const char *buf, size_t count)
178 err = kstrtoul(buf, 10, &msecs);
179 if (err || msecs > UINT_MAX)
182 khugepaged_scan_sleep_millisecs = msecs;
183 khugepaged_sleep_expire = 0;
184 wake_up_interruptible(&khugepaged_wait);
188 static struct kobj_attribute scan_sleep_millisecs_attr =
189 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
190 scan_sleep_millisecs_store);
192 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
193 struct kobj_attribute *attr,
196 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
199 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
200 struct kobj_attribute *attr,
201 const char *buf, size_t count)
206 err = kstrtoul(buf, 10, &msecs);
207 if (err || msecs > UINT_MAX)
210 khugepaged_alloc_sleep_millisecs = msecs;
211 khugepaged_sleep_expire = 0;
212 wake_up_interruptible(&khugepaged_wait);
216 static struct kobj_attribute alloc_sleep_millisecs_attr =
217 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
218 alloc_sleep_millisecs_store);
220 static ssize_t pages_to_scan_show(struct kobject *kobj,
221 struct kobj_attribute *attr,
224 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
226 static ssize_t pages_to_scan_store(struct kobject *kobj,
227 struct kobj_attribute *attr,
228 const char *buf, size_t count)
233 err = kstrtoul(buf, 10, &pages);
234 if (err || !pages || pages > UINT_MAX)
237 khugepaged_pages_to_scan = pages;
241 static struct kobj_attribute pages_to_scan_attr =
242 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
243 pages_to_scan_store);
245 static ssize_t pages_collapsed_show(struct kobject *kobj,
246 struct kobj_attribute *attr,
249 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
251 static struct kobj_attribute pages_collapsed_attr =
252 __ATTR_RO(pages_collapsed);
254 static ssize_t full_scans_show(struct kobject *kobj,
255 struct kobj_attribute *attr,
258 return sprintf(buf, "%u\n", khugepaged_full_scans);
260 static struct kobj_attribute full_scans_attr =
261 __ATTR_RO(full_scans);
263 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
264 struct kobj_attribute *attr, char *buf)
266 return single_hugepage_flag_show(kobj, attr, buf,
267 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
269 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
270 struct kobj_attribute *attr,
271 const char *buf, size_t count)
273 return single_hugepage_flag_store(kobj, attr, buf, count,
274 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
276 static struct kobj_attribute khugepaged_defrag_attr =
277 __ATTR(defrag, 0644, khugepaged_defrag_show,
278 khugepaged_defrag_store);
281 * max_ptes_none controls if khugepaged should collapse hugepages over
282 * any unmapped ptes in turn potentially increasing the memory
283 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
284 * reduce the available free memory in the system as it
285 * runs. Increasing max_ptes_none will instead potentially reduce the
286 * free memory in the system during the khugepaged scan.
288 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
289 struct kobj_attribute *attr,
292 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
294 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
295 struct kobj_attribute *attr,
296 const char *buf, size_t count)
299 unsigned long max_ptes_none;
301 err = kstrtoul(buf, 10, &max_ptes_none);
302 if (err || max_ptes_none > HPAGE_PMD_NR-1)
305 khugepaged_max_ptes_none = max_ptes_none;
309 static struct kobj_attribute khugepaged_max_ptes_none_attr =
310 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
311 khugepaged_max_ptes_none_store);
313 static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
314 struct kobj_attribute *attr,
317 return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
320 static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
321 struct kobj_attribute *attr,
322 const char *buf, size_t count)
325 unsigned long max_ptes_swap;
327 err = kstrtoul(buf, 10, &max_ptes_swap);
328 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
331 khugepaged_max_ptes_swap = max_ptes_swap;
336 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
337 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
338 khugepaged_max_ptes_swap_store);
340 static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj,
341 struct kobj_attribute *attr,
344 return sprintf(buf, "%u\n", khugepaged_max_ptes_shared);
347 static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
348 struct kobj_attribute *attr,
349 const char *buf, size_t count)
352 unsigned long max_ptes_shared;
354 err = kstrtoul(buf, 10, &max_ptes_shared);
355 if (err || max_ptes_shared > HPAGE_PMD_NR-1)
358 khugepaged_max_ptes_shared = max_ptes_shared;
363 static struct kobj_attribute khugepaged_max_ptes_shared_attr =
364 __ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show,
365 khugepaged_max_ptes_shared_store);
367 static struct attribute *khugepaged_attr[] = {
368 &khugepaged_defrag_attr.attr,
369 &khugepaged_max_ptes_none_attr.attr,
370 &khugepaged_max_ptes_swap_attr.attr,
371 &khugepaged_max_ptes_shared_attr.attr,
372 &pages_to_scan_attr.attr,
373 &pages_collapsed_attr.attr,
374 &full_scans_attr.attr,
375 &scan_sleep_millisecs_attr.attr,
376 &alloc_sleep_millisecs_attr.attr,
380 struct attribute_group khugepaged_attr_group = {
381 .attrs = khugepaged_attr,
382 .name = "khugepaged",
384 #endif /* CONFIG_SYSFS */
386 int hugepage_madvise(struct vm_area_struct *vma,
387 unsigned long *vm_flags, int advice)
393 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
394 * can't handle this properly after s390_enable_sie, so we simply
395 * ignore the madvise to prevent qemu from causing a SIGSEGV.
397 if (mm_has_pgste(vma->vm_mm))
400 *vm_flags &= ~VM_NOHUGEPAGE;
401 *vm_flags |= VM_HUGEPAGE;
403 * If the vma become good for khugepaged to scan,
404 * register it here without waiting a page fault that
405 * may not happen any time soon.
407 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
408 khugepaged_enter_vma_merge(vma, *vm_flags))
411 case MADV_NOHUGEPAGE:
412 *vm_flags &= ~VM_HUGEPAGE;
413 *vm_flags |= VM_NOHUGEPAGE;
415 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
416 * this vma even if we leave the mm registered in khugepaged if
417 * it got registered before VM_NOHUGEPAGE was set.
425 int __init khugepaged_init(void)
427 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
428 sizeof(struct mm_slot),
429 __alignof__(struct mm_slot), 0, NULL);
433 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
434 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
435 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
436 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
438 #ifdef CONFIG_FINEGRAINED_THP
439 khugepaged_max_ptes_none_64kb = HPAGE_CONT_PTE_NR - 1;
440 khugepaged_max_ptes_swap_64kb = HPAGE_CONT_PTE_NR / 8;
441 khugepaged_max_ptes_shared_64kb = HPAGE_CONT_PTE_NR / 2;
446 void __init khugepaged_destroy(void)
448 kmem_cache_destroy(mm_slot_cache);
451 static inline struct mm_slot *alloc_mm_slot(void)
453 if (!mm_slot_cache) /* initialization failed */
455 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
458 static inline void free_mm_slot(struct mm_slot *mm_slot)
460 kmem_cache_free(mm_slot_cache, mm_slot);
463 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
465 struct mm_slot *mm_slot;
467 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
468 if (mm == mm_slot->mm)
474 static void insert_to_mm_slots_hash(struct mm_struct *mm,
475 struct mm_slot *mm_slot)
478 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
481 static inline int khugepaged_test_exit(struct mm_struct *mm)
483 return atomic_read(&mm->mm_users) == 0;
486 #ifdef CONFIG_FINEGRAINED_THP
487 static void clear_hint_list(struct mm_slot *slot);
488 #endif /* CONFIG_FINEGRAINED_THP */
490 static bool hugepage_vma_check(struct vm_area_struct *vma,
491 unsigned long vm_flags)
493 if (!transhuge_vma_enabled(vma, vm_flags))
496 if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) -
497 vma->vm_pgoff, HPAGE_PMD_NR))
500 /* Check arch-dependent shmem hugepage available */
501 if (arch_hugepage_vma_shmem_check(vma, vm_flags))
503 /* Enabled via shmem mount options or sysfs settings. */
504 if (shmem_file(vma->vm_file))
505 return shmem_huge_enabled(vma);
507 /* THP settings require madvise. */
508 if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
511 /* Check arch-dependent file hugepage available */
512 if (arch_hugepage_vma_file_check(vma, vm_flags))
514 /* Only regular file is valid */
515 else if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
516 (vm_flags & VM_DENYWRITE)) {
517 struct inode *inode = vma->vm_file->f_inode;
519 return S_ISREG(inode->i_mode);
522 if (!vma->anon_vma || vma->vm_ops)
524 if (vma_is_temporary_stack(vma))
526 return !(vm_flags & VM_NO_KHUGEPAGED);
529 int __khugepaged_enter(struct mm_struct *mm)
531 struct mm_slot *mm_slot;
534 mm_slot = alloc_mm_slot();
538 /* __khugepaged_exit() must not run from under us */
539 VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
540 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
541 free_mm_slot(mm_slot);
545 spin_lock(&khugepaged_mm_lock);
546 insert_to_mm_slots_hash(mm, mm_slot);
548 * Insert just behind the scanning cursor, to let the area settle
551 wakeup = list_empty(&khugepaged_scan.mm_head);
552 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
553 spin_unlock(&khugepaged_mm_lock);
557 wake_up_interruptible(&khugepaged_wait);
562 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
563 unsigned long vm_flags)
565 unsigned long hstart, hend;
568 * khugepaged only supports read-only files for non-shmem files.
569 * khugepaged does not yet work on special mappings. And
570 * file-private shmem THP is not supported.
572 if (!hugepage_vma_check(vma, vm_flags))
575 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
576 hend = vma->vm_end & HPAGE_PMD_MASK;
578 return khugepaged_enter(vma, vm_flags);
579 #ifdef CONFIG_FINEGRAINED_THP
580 hstart = (vma->vm_start + ~HPAGE_CONT_PTE_MASK) & HPAGE_CONT_PTE_MASK;
581 hend = vma->vm_end & HPAGE_CONT_PTE_MASK;
583 return khugepaged_enter(vma, vm_flags);
584 #endif /* CONFIG_FINEGRAINED_THP */
588 void __khugepaged_exit(struct mm_struct *mm)
590 struct mm_slot *mm_slot;
593 spin_lock(&khugepaged_mm_lock);
594 mm_slot = get_mm_slot(mm);
595 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
596 #ifdef CONFIG_FINEGRAINED_THP
597 clear_hint_list(mm_slot);
599 hash_del(&mm_slot->hash);
600 list_del(&mm_slot->mm_node);
603 spin_unlock(&khugepaged_mm_lock);
606 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
607 free_mm_slot(mm_slot);
609 } else if (mm_slot) {
611 * This is required to serialize against
612 * khugepaged_test_exit() (which is guaranteed to run
613 * under mmap sem read mode). Stop here (after we
614 * return all pagetables will be destroyed) until
615 * khugepaged has finished working on the pagetables
616 * under the mmap_lock.
619 mmap_write_unlock(mm);
623 static void release_pte_page(struct page *page)
625 mod_node_page_state(page_pgdat(page),
626 NR_ISOLATED_ANON + page_is_file_lru(page),
629 putback_lru_page(page);
632 static void release_pte_pages(pte_t *pte, pte_t *_pte,
633 struct list_head *compound_pagelist)
635 struct page *page, *tmp;
637 while (--_pte >= pte) {
638 pte_t pteval = *_pte;
640 page = pte_page(pteval);
641 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
643 release_pte_page(page);
646 list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
647 list_del(&page->lru);
648 release_pte_page(page);
652 static bool is_refcount_suitable(struct page *page)
654 int expected_refcount;
656 expected_refcount = total_mapcount(page);
657 if (PageSwapCache(page))
658 expected_refcount += compound_nr(page);
660 return page_count(page) == expected_refcount;
663 #ifdef CONFIG_FINEGRAINED_THP
664 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
665 unsigned long address,
667 struct list_head *compound_pagelist,
669 #else /* CONFIG_FINEGRAINED_THP */
670 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
671 unsigned long address,
673 struct list_head *compound_pagelist)
674 #endif /* CONFIG_FINEGRAINED_THP */
676 struct page *page = NULL;
678 int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
679 bool writable = false;
680 #ifdef CONFIG_FINEGRAINED_THP
681 int max_ptes_shared, max_ptes_none;
684 if (hpage_type == THP_TYPE_64KB) {
685 hpage_nr = HPAGE_CONT_PTE_NR;
686 max_ptes_shared = khugepaged_max_ptes_shared_64kb;
687 max_ptes_none = khugepaged_max_ptes_none_64kb;
689 hpage_nr = HPAGE_PMD_NR;
690 max_ptes_shared = khugepaged_max_ptes_shared;
691 max_ptes_none = khugepaged_max_ptes_none;
693 #endif /* CONFIG_FINEGRAINED_THP */
696 #ifdef CONFIG_FINEGRAINED_THP
697 _pte < pte + hpage_nr;
699 _pte < pte+HPAGE_PMD_NR;
701 _pte++, address += PAGE_SIZE) {
702 pte_t pteval = *_pte;
703 if (pte_none(pteval) || (pte_present(pteval) &&
704 is_zero_pfn(pte_pfn(pteval)))) {
705 #ifdef CONFIG_FINEGRAINED_THP
706 if (!userfaultfd_armed(vma) &&
707 ++none_or_zero <= max_ptes_none)
708 #else /* CONFIG_FINEGRAINED_THP */
709 if (!userfaultfd_armed(vma) &&
710 ++none_or_zero <= khugepaged_max_ptes_none)
711 #endif /* CONFIG_FINEGRAINED_THP */
715 result = SCAN_EXCEED_NONE_PTE;
719 if (!pte_present(pteval)) {
720 result = SCAN_PTE_NON_PRESENT;
723 page = vm_normal_page(vma, address, pteval);
724 if (unlikely(!page)) {
725 result = SCAN_PAGE_NULL;
729 VM_BUG_ON_PAGE(!PageAnon(page), page);
731 #ifdef CONFIG_FINEGRAINED_THP
732 if (page_mapcount(page) > 1 &&
733 ++shared > max_ptes_shared)
734 #else /* CONFIG_FINEGRAINED_THP */
735 if (page_mapcount(page) > 1 &&
736 ++shared > khugepaged_max_ptes_shared)
737 #endif /* CONFIG_FINEGRAINED_THP */
739 result = SCAN_EXCEED_SHARED_PTE;
743 if (PageCompound(page)) {
745 page = compound_head(page);
748 * Check if we have dealt with the compound page
751 list_for_each_entry(p, compound_pagelist, lru) {
758 * We can do it before isolate_lru_page because the
759 * page can't be freed from under us. NOTE: PG_lock
760 * is needed to serialize against split_huge_page
761 * when invoked from the VM.
763 if (!trylock_page(page)) {
764 result = SCAN_PAGE_LOCK;
769 * Check if the page has any GUP (or other external) pins.
771 * The page table that maps the page has been already unlinked
772 * from the page table tree and this process cannot get
773 * an additinal pin on the page.
775 * New pins can come later if the page is shared across fork,
776 * but not from this process. The other process cannot write to
777 * the page, only trigger CoW.
779 if (!is_refcount_suitable(page)) {
781 result = SCAN_PAGE_COUNT;
784 if (!pte_write(pteval) && PageSwapCache(page) &&
785 !reuse_swap_page(page, NULL)) {
787 * Page is in the swap cache and cannot be re-used.
788 * It cannot be collapsed into a THP.
791 result = SCAN_SWAP_CACHE_PAGE;
796 * Isolate the page to avoid collapsing an hugepage
797 * currently in use by the VM.
799 if (isolate_lru_page(page)) {
801 result = SCAN_DEL_PAGE_LRU;
804 mod_node_page_state(page_pgdat(page),
805 NR_ISOLATED_ANON + page_is_file_lru(page),
807 VM_BUG_ON_PAGE(!PageLocked(page), page);
808 VM_BUG_ON_PAGE(PageLRU(page), page);
810 if (PageCompound(page))
811 list_add_tail(&page->lru, compound_pagelist);
813 /* There should be enough young pte to collapse the page */
814 if (pte_young(pteval) ||
815 page_is_young(page) || PageReferenced(page) ||
816 mmu_notifier_test_young(vma->vm_mm, address))
819 if (pte_write(pteval))
823 if (unlikely(!writable)) {
824 result = SCAN_PAGE_RO;
825 } else if (unlikely(!referenced)) {
826 result = SCAN_LACK_REFERENCED_PAGE;
828 result = SCAN_SUCCEED;
829 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
830 referenced, writable, result);
834 release_pte_pages(pte, _pte, compound_pagelist);
835 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
836 referenced, writable, result);
840 #ifdef CONFIG_FINEGRAINED_THP
841 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
842 struct vm_area_struct *vma,
843 unsigned long address,
845 struct list_head *compound_pagelist,
847 #else /* CONFIG_FINEGRAINED_THP */
848 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
849 struct vm_area_struct *vma,
850 unsigned long address,
852 struct list_head *compound_pagelist)
853 #endif /* CONFIG_FINEGRAINED_THP */
855 struct page *src_page, *tmp;
857 #ifdef CONFIG_FINEGRAINED_THP
858 int hpage_nr = (hpage_type == THP_TYPE_64KB ?
859 HPAGE_CONT_PTE_NR : HPAGE_PMD_NR);
863 #ifdef CONFIG_FINEGRAINED_THP
864 _pte < pte + hpage_nr;
866 _pte < pte + HPAGE_PMD_NR;
868 _pte++, page++, address += PAGE_SIZE) {
869 pte_t pteval = *_pte;
871 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
872 clear_user_highpage(page, address);
873 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
874 if (is_zero_pfn(pte_pfn(pteval))) {
876 * ptl mostly unnecessary.
880 * paravirt calls inside pte_clear here are
883 pte_clear(vma->vm_mm, address, _pte);
887 src_page = pte_page(pteval);
888 copy_user_highpage(page, src_page, address, vma);
889 if (!PageCompound(src_page))
890 release_pte_page(src_page);
892 * ptl mostly unnecessary, but preempt has to
893 * be disabled to update the per-cpu stats
894 * inside page_remove_rmap().
898 * paravirt calls inside pte_clear here are
901 pte_clear(vma->vm_mm, address, _pte);
902 page_remove_rmap(src_page, false);
904 free_page_and_swap_cache(src_page);
908 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
909 list_del(&src_page->lru);
910 release_pte_page(src_page);
914 static void khugepaged_alloc_sleep(void)
918 add_wait_queue(&khugepaged_wait, &wait);
919 freezable_schedule_timeout_interruptible(
920 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
921 remove_wait_queue(&khugepaged_wait, &wait);
924 static int khugepaged_node_load[MAX_NUMNODES];
926 static bool khugepaged_scan_abort(int nid)
931 * If node_reclaim_mode is disabled, then no extra effort is made to
932 * allocate memory locally.
934 if (!node_reclaim_mode)
937 /* If there is a count for this node already, it must be acceptable */
938 if (khugepaged_node_load[nid])
941 for (i = 0; i < MAX_NUMNODES; i++) {
942 if (!khugepaged_node_load[i])
944 if (node_distance(nid, i) > node_reclaim_distance)
950 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
951 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
953 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
957 static int khugepaged_find_target_node(void)
959 static int last_khugepaged_target_node = NUMA_NO_NODE;
960 int nid, target_node = 0, max_value = 0;
962 /* find first node with max normal pages hit */
963 for (nid = 0; nid < MAX_NUMNODES; nid++)
964 if (khugepaged_node_load[nid] > max_value) {
965 max_value = khugepaged_node_load[nid];
969 /* do some balance if several nodes have the same hit record */
970 if (target_node <= last_khugepaged_target_node)
971 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
973 if (max_value == khugepaged_node_load[nid]) {
978 last_khugepaged_target_node = target_node;
982 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
984 if (IS_ERR(*hpage)) {
990 khugepaged_alloc_sleep();
1000 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
1002 VM_BUG_ON_PAGE(*hpage, *hpage);
1004 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
1005 if (unlikely(!*hpage)) {
1006 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1007 *hpage = ERR_PTR(-ENOMEM);
1011 prep_transhuge_page(*hpage);
1012 count_vm_event(THP_COLLAPSE_ALLOC);
1016 static int khugepaged_find_target_node(void)
1021 #ifdef CONFIG_FINEGRAINED_THP
1022 static inline struct page *alloc_khugepaged_hugepage(int hpage_order)
1024 static inline struct page *alloc_khugepaged_hugepage(void)
1029 #ifdef CONFIG_FINEGRAINED_THP
1030 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
1033 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
1037 prep_transhuge_page(page);
1041 static struct page *khugepaged_alloc_hugepage(bool *wait)
1046 #ifdef CONFIG_FINEGRAINED_THP
1047 hpage = alloc_khugepaged_hugepage(HPAGE_PMD_ORDER);
1049 hpage = alloc_khugepaged_hugepage();
1052 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1057 khugepaged_alloc_sleep();
1059 count_vm_event(THP_COLLAPSE_ALLOC);
1060 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
1065 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
1068 * If the hpage allocated earlier was briefly exposed in page cache
1069 * before collapse_file() failed, it is possible that racing lookups
1070 * have not yet completed, and would then be unpleasantly surprised by
1071 * finding the hpage reused for the same mapping at a different offset.
1072 * Just release the previous allocation if there is any danger of that.
1074 if (*hpage && page_count(*hpage) > 1) {
1080 *hpage = khugepaged_alloc_hugepage(wait);
1082 if (unlikely(!*hpage))
1088 #ifdef CONFIG_FINEGRAINED_THP
1089 static struct page *
1090 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node, int hpage_type)
1094 if (hpage_type == THP_TYPE_64KB)
1095 page = alloc_khugepaged_hugepage(HPAGE_CONT_PTE_ORDER);
1102 #else /* CONFIG_FINEGRAINED_THP */
1103 static struct page *
1104 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
1110 #endif /* CONFIG_FINEGRAINED_THP */
1114 * If mmap_lock temporarily dropped, revalidate vma
1115 * before taking mmap_lock.
1116 * Return 0 if succeeds, otherwise return none-zero
1117 * value (scan code).
1120 #ifdef CONFIG_FINEGRAINED_THP
1121 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
1122 struct vm_area_struct **vmap, int hpage_type)
1124 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
1125 struct vm_area_struct **vmap)
1128 struct vm_area_struct *vma;
1129 unsigned long hstart, hend;
1131 if (unlikely(khugepaged_test_exit(mm)))
1132 return SCAN_ANY_PROCESS;
1134 *vmap = vma = find_vma(mm, address);
1136 return SCAN_VMA_NULL;
1138 #ifdef CONFIG_FINEGRAINED_THP
1139 if (hpage_type == THP_TYPE_64KB) {
1140 hstart = (vma->vm_start + ~HPAGE_CONT_PTE_MASK) & HPAGE_CONT_PTE_MASK;
1141 hend = vma->vm_end & HPAGE_CONT_PTE_MASK;
1142 if (address < hstart || address + HPAGE_CONT_PTE_SIZE > hend)
1143 return SCAN_ADDRESS_RANGE;
1144 if (!hugepage_vma_check(vma, vma->vm_flags))
1145 return SCAN_VMA_CHECK;
1148 #endif /* CONFIG_FINEGRAINED_THP */
1149 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1150 hend = vma->vm_end & HPAGE_PMD_MASK;
1151 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
1152 return SCAN_ADDRESS_RANGE;
1153 if (!hugepage_vma_check(vma, vma->vm_flags))
1154 return SCAN_VMA_CHECK;
1155 /* Anon VMA expected */
1156 if (!vma->anon_vma || vma->vm_ops)
1157 return SCAN_VMA_CHECK;
1162 * Bring missing pages in from swap, to complete THP collapse.
1163 * Only done if khugepaged_scan_pmd believes it is worthwhile.
1165 * Called and returns without pte mapped or spinlocks held,
1166 * but with mmap_lock held to protect against vma changes.
1169 #ifdef CONFIG_FINEGRAINED_THP
1170 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
1171 struct vm_area_struct *vma,
1172 unsigned long address, pmd_t *pmd,
1173 int referenced, int hpage_type)
1174 #else /* CONFIG_FINEGRAINED_THP */
1175 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
1176 struct vm_area_struct *vma,
1177 unsigned long address, pmd_t *pmd,
1179 #endif /* CONFIG_FINEGRAINED_THP */
1183 struct vm_fault vmf = {
1186 .flags = FAULT_FLAG_ALLOW_RETRY,
1188 .pgoff = linear_page_index(vma, address),
1190 #ifdef CONFIG_FINEGRAINED_THP
1191 int hpage_size = (hpage_type == THP_TYPE_64KB) ?
1192 HPAGE_CONT_PTE_SIZE : HPAGE_PMD_SIZE;
1195 vmf.pte = pte_offset_map(pmd, address);
1197 #ifdef CONFIG_FINEGRAINED_THP
1198 vmf.address < address + hpage_size;
1200 vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
1202 vmf.pte++, vmf.address += PAGE_SIZE) {
1203 vmf.orig_pte = *vmf.pte;
1204 if (!is_swap_pte(vmf.orig_pte))
1207 ret = do_swap_page(&vmf);
1209 /* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
1210 if (ret & VM_FAULT_RETRY) {
1212 #ifdef CONFIG_FINEGRAINED_THP
1213 if (hugepage_vma_revalidate(mm, address, &vmf.vma, hpage_type))
1215 if (hugepage_vma_revalidate(mm, address, &vmf.vma))
1218 /* vma is no longer available, don't continue to swapin */
1219 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1222 /* check if the pmd is still valid */
1223 if (mm_find_pmd(mm, address) != pmd) {
1224 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1228 if (ret & VM_FAULT_ERROR) {
1229 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1232 /* pte is unmapped now, we need to map it */
1233 vmf.pte = pte_offset_map(pmd, vmf.address);
1238 /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1242 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
1246 #ifdef CONFIG_FINEGRAINED_THP
1247 static void collapse_huge_page(struct mm_struct *mm,
1248 unsigned long address,
1249 struct page **hpage,
1250 int node, int referenced, int unmapped,
1252 #else /* CONFIG_FINEGRAINED_THP */
1253 static void collapse_huge_page(struct mm_struct *mm,
1254 unsigned long address,
1255 struct page **hpage,
1256 int node, int referenced, int unmapped)
1257 #endif /* CONFIG_FINEGRAINED_THP */
1259 LIST_HEAD(compound_pagelist);
1263 struct page *new_page;
1264 spinlock_t *pmd_ptl, *pte_ptl;
1265 int isolated = 0, result = 0;
1266 struct vm_area_struct *vma;
1267 struct mmu_notifier_range range;
1270 #ifdef CONFIG_FINEGRAINED_THP
1273 VM_BUG_ON(address & (hpage_type == THP_TYPE_64KB ?
1274 ~HPAGE_CONT_PTE_MASK : ~HPAGE_PMD_MASK));
1276 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1279 /* Only allocate from the target node */
1280 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1283 * Before allocating the hugepage, release the mmap_lock read lock.
1284 * The allocation can take potentially a long time if it involves
1285 * sync compaction, and we do not need to hold the mmap_lock during
1286 * that. We will recheck the vma after taking it again in write mode.
1288 mmap_read_unlock(mm);
1289 #ifdef CONFIG_FINEGRAINED_THP
1290 new_page = khugepaged_alloc_page(hpage, gfp, node, hpage_type);
1292 new_page = khugepaged_alloc_page(hpage, gfp, node);
1295 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1299 if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
1300 result = SCAN_CGROUP_CHARGE_FAIL;
1303 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1306 #ifdef CONFIG_FINEGRAINED_THP
1307 result = hugepage_vma_revalidate(mm, address, &vma, hpage_type);
1309 result = hugepage_vma_revalidate(mm, address, &vma);
1312 mmap_read_unlock(mm);
1316 pmd = mm_find_pmd(mm, address);
1318 result = SCAN_PMD_NULL;
1319 mmap_read_unlock(mm);
1324 * __collapse_huge_page_swapin always returns with mmap_lock locked.
1325 * If it fails, we release mmap_lock and jump out_nolock.
1326 * Continuing to collapse causes inconsistency.
1328 #ifdef CONFIG_FINEGRAINED_THP
1329 if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1330 pmd, referenced, hpage_type)) {
1331 mmap_read_unlock(mm);
1334 #else /* CONFIG_FINEGRAINED_THP */
1335 if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1337 mmap_read_unlock(mm);
1340 #endif /* CONFIG_FINEGRAINED_THP*/
1342 mmap_read_unlock(mm);
1344 * Prevent all access to pagetables with the exception of
1345 * gup_fast later handled by the ptep_clear_flush and the VM
1346 * handled by the anon_vma lock + PG_lock.
1348 mmap_write_lock(mm);
1349 #ifdef CONFIG_FINEGRAINED_THP
1350 result = hugepage_vma_revalidate(mm, address, &vma, hpage_type);
1352 result = hugepage_vma_revalidate(mm, address, &vma);
1356 /* check if the pmd is still valid */
1357 if (mm_find_pmd(mm, address) != pmd)
1360 anon_vma_lock_write(vma->anon_vma);
1362 #ifdef CONFIG_FINEGRAINED_THP
1363 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1364 address, address + (hpage_type == THP_TYPE_64KB ?
1365 HPAGE_CONT_PTE_SIZE : HPAGE_PMD_SIZE));
1367 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1368 address, address + HPAGE_PMD_SIZE);
1370 mmu_notifier_invalidate_range_start(&range);
1372 pte = pte_offset_map(pmd, address);
1373 pte_ptl = pte_lockptr(mm, pmd);
1375 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1377 * After this gup_fast can't run anymore. This also removes
1378 * any huge TLB entry from the CPU so we won't allow
1379 * huge and small TLB entries for the same virtual address
1380 * to avoid the risk of CPU bugs in that area.
1382 #ifdef CONFIG_FINEGRAINED_THP
1383 if (hpage_type == THP_TYPE_64KB)
1384 /* FIXME: clearing ptes here causes
1385 * __collapse_huge_page_isolate and __collapse_huge_page_copy
1386 * to fail, __collapse_huge_page_copy also clears ptes
1388 flush_tlb_range(vma, address, address + HPAGE_CONT_PTE_SIZE);
1390 #endif /* CONFIG_FINEGRAINED_THP */
1391 _pmd = pmdp_collapse_flush(vma, address, pmd);
1392 spin_unlock(pmd_ptl);
1393 mmu_notifier_invalidate_range_end(&range);
1396 #ifdef CONFIG_FINEGRAINED_THP
1397 isolated = __collapse_huge_page_isolate(vma, address, pte,
1398 &compound_pagelist, hpage_type);
1399 #else /* CONFIG_FINEGRAINED_THP */
1400 isolated = __collapse_huge_page_isolate(vma, address, pte,
1401 &compound_pagelist);
1402 #endif /* CONFIG_FINEGRAINED_THP */
1403 spin_unlock(pte_ptl);
1405 if (unlikely(!isolated)) {
1406 #ifdef CONFIG_FINEGRAINED_THP
1407 if (hpage_type == THP_TYPE_64KB) {
1409 anon_vma_unlock_write(vma->anon_vma);
1413 #endif /* CONFIG_FINEGRAINED_THP */
1416 BUG_ON(!pmd_none(*pmd));
1418 * We can only use set_pmd_at when establishing
1419 * hugepmds and never for establishing regular pmds that
1420 * points to regular pagetables. Use pmd_populate for that
1422 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1423 spin_unlock(pmd_ptl);
1424 anon_vma_unlock_write(vma->anon_vma);
1430 * All pages are isolated and locked so anon_vma rmap
1431 * can't run anymore.
1433 anon_vma_unlock_write(vma->anon_vma);
1435 #ifdef CONFIG_FINEGRAINED_THP
1436 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1437 &compound_pagelist, hpage_type);
1438 #else /* CONFIG_FINEGRAINED_THP */
1439 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1440 &compound_pagelist);
1441 #endif /* CONFIG_FINEGRAINED_THP */
1443 __SetPageUptodate(new_page);
1445 #ifdef CONFIG_FINEGRAINED_THP
1446 if (hpage_type == THP_TYPE_64KB) {
1448 _pte = arch_make_huge_pte(new_page, vma);
1449 _pte = maybe_mkwrite(pte_mkdirty(_pte), vma);
1452 pgtable = pmd_pgtable(_pmd);
1454 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1455 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1457 #else /* CONFIG_FINEGRAINED_THP */
1458 pgtable = pmd_pgtable(_pmd);
1460 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1461 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1462 #endif /* CONFIG_FINEGRAINED_THP */
1464 * spin_lock() below is not the equivalent of smp_wmb(), so
1465 * this is needed to avoid the copy_huge_page writes to become
1466 * visible after the set_pmd_at() write.
1471 #ifdef CONFIG_FINEGRAINED_THP
1472 if (hpage_type == THP_TYPE_2MB)
1474 BUG_ON(!pmd_none(*pmd));
1475 page_add_new_anon_rmap(new_page, vma, address, true);
1476 lru_cache_add_inactive_or_unevictable(new_page, vma);
1478 #ifdef CONFIG_FINEGRAINED_THP
1479 if (hpage_type == THP_TYPE_64KB)
1480 arch_set_huge_pte_at(mm, address, pte, _pte, 0);
1482 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1483 set_pmd_at(mm, address, pmd, _pmd);
1485 update_mmu_cache_pmd(vma, address, pmd);
1486 #else /* CONFIG_FINEGRAINED_THP */
1487 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1488 set_pmd_at(mm, address, pmd, _pmd);
1489 update_mmu_cache_pmd(vma, address, pmd);
1490 #endif /* CONFIG_FINEGRAINED_THP */
1491 spin_unlock(pmd_ptl);
1493 #ifdef CONFIG_FINEGRAINED_THP
1494 if (hpage_type == THP_TYPE_2MB)
1498 khugepaged_pages_collapsed++;
1499 result = SCAN_SUCCEED;
1501 mmap_write_unlock(mm);
1503 if (!IS_ERR_OR_NULL(*hpage))
1504 mem_cgroup_uncharge(*hpage);
1505 #ifdef CONFIG_FINEGRAINED_THP
1506 if (result != SCAN_SUCCEED && new_page && hpage_type == THP_TYPE_64KB)
1509 trace_mm_collapse_huge_page(mm, isolated, result);
1515 #ifdef CONFIG_FINEGRAINED_THP
1516 static int khugepaged_scan_pmd(struct mm_struct *mm,
1517 struct vm_area_struct *vma,
1518 unsigned long address,
1519 struct page **hpage, int hpage_type)
1520 #else /* CONFIG_FINEGRAINED_THP */
1521 static int khugepaged_scan_pmd(struct mm_struct *mm,
1522 struct vm_area_struct *vma,
1523 unsigned long address,
1524 struct page **hpage)
1525 #endif /* CONFIG_FINEGRAINED_THP */
1529 int ret = 0, result = 0, referenced = 0;
1530 int none_or_zero = 0, shared = 0;
1531 struct page *page = NULL;
1532 unsigned long _address;
1534 int node = NUMA_NO_NODE, unmapped = 0;
1535 bool writable = false;
1537 #ifdef CONFIG_FINEGRAINED_THP
1539 int max_ptes_swap, max_ptes_none, max_ptes_shared;
1541 if (hpage_type == THP_TYPE_64KB) {
1542 VM_BUG_ON(address & ~HPAGE_CONT_PTE_MASK);
1543 hpage_nr = HPAGE_CONT_PTE_NR;
1544 max_ptes_swap = khugepaged_max_ptes_swap_64kb;
1545 max_ptes_none = khugepaged_max_ptes_none_64kb;
1546 max_ptes_shared = khugepaged_max_ptes_shared_64kb;
1548 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1549 hpage_nr = HPAGE_PMD_NR;
1550 max_ptes_swap = khugepaged_max_ptes_swap;
1551 max_ptes_none = khugepaged_max_ptes_none;
1552 max_ptes_shared = khugepaged_max_ptes_shared;
1554 #else /* CONFIG_FINEGRAINED_THP */
1555 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1556 #endif /* CONFIG_FINEGRAINED_THP */
1558 pmd = mm_find_pmd(mm, address);
1560 result = SCAN_PMD_NULL;
1564 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1565 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1566 for (_address = address, _pte = pte;
1567 #ifdef CONFIG_FINEGRAINED_THP
1568 _pte < pte + hpage_nr;
1570 _pte < pte+HPAGE_PMD_NR;
1572 _pte++, _address += PAGE_SIZE) {
1573 pte_t pteval = *_pte;
1574 if (is_swap_pte(pteval)) {
1575 #ifdef CONFIG_FINEGRAINED_THP
1576 if (++unmapped <= max_ptes_swap)
1578 if (++unmapped <= khugepaged_max_ptes_swap)
1582 * Always be strict with uffd-wp
1583 * enabled swap entries. Please see
1584 * comment below for pte_uffd_wp().
1586 if (pte_swp_uffd_wp(pteval)) {
1587 result = SCAN_PTE_UFFD_WP;
1592 result = SCAN_EXCEED_SWAP_PTE;
1596 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1597 if (!userfaultfd_armed(vma) &&
1598 #ifdef CONFIG_FINEGRAINED_THP
1599 ++none_or_zero <= max_ptes_none
1601 ++none_or_zero <= khugepaged_max_ptes_none
1607 result = SCAN_EXCEED_NONE_PTE;
1611 if (!pte_present(pteval)) {
1612 result = SCAN_PTE_NON_PRESENT;
1615 if (pte_uffd_wp(pteval)) {
1617 * Don't collapse the page if any of the small
1618 * PTEs are armed with uffd write protection.
1619 * Here we can also mark the new huge pmd as
1620 * write protected if any of the small ones is
1621 * marked but that could bring uknown
1622 * userfault messages that falls outside of
1623 * the registered range. So, just be simple.
1625 result = SCAN_PTE_UFFD_WP;
1628 if (pte_write(pteval))
1631 page = vm_normal_page(vma, _address, pteval);
1632 if (unlikely(!page)) {
1633 result = SCAN_PAGE_NULL;
1637 #ifdef CONFIG_FINEGRAINED_THP
1638 if (PageCompound(page) && PageTransHuge(compound_head(page))) {
1639 result = SCAN_PAGE_COMPOUND;
1643 if (page_mapcount(page) > 1 &&
1644 ++shared > max_ptes_shared)
1646 if (page_mapcount(page) > 1 &&
1647 ++shared > khugepaged_max_ptes_shared)
1650 result = SCAN_EXCEED_SHARED_PTE;
1654 page = compound_head(page);
1657 * Record which node the original page is from and save this
1658 * information to khugepaged_node_load[].
1659 * Khupaged will allocate hugepage from the node has the max
1662 node = page_to_nid(page);
1663 if (khugepaged_scan_abort(node)) {
1664 result = SCAN_SCAN_ABORT;
1667 khugepaged_node_load[node]++;
1668 if (!PageLRU(page)) {
1669 result = SCAN_PAGE_LRU;
1672 if (PageLocked(page)) {
1673 result = SCAN_PAGE_LOCK;
1676 if (!PageAnon(page)) {
1677 result = SCAN_PAGE_ANON;
1682 * Check if the page has any GUP (or other external) pins.
1684 * Here the check is racy it may see totmal_mapcount > refcount
1686 * For example, one process with one forked child process.
1687 * The parent has the PMD split due to MADV_DONTNEED, then
1688 * the child is trying unmap the whole PMD, but khugepaged
1689 * may be scanning the parent between the child has
1690 * PageDoubleMap flag cleared and dec the mapcount. So
1691 * khugepaged may see total_mapcount > refcount.
1693 * But such case is ephemeral we could always retry collapse
1694 * later. However it may report false positive if the page
1695 * has excessive GUP pins (i.e. 512). Anyway the same check
1696 * will be done again later the risk seems low.
1698 if (!is_refcount_suitable(page)) {
1699 result = SCAN_PAGE_COUNT;
1702 if (pte_young(pteval) ||
1703 page_is_young(page) || PageReferenced(page) ||
1704 mmu_notifier_test_young(vma->vm_mm, address))
1708 result = SCAN_PAGE_RO;
1709 } else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1710 result = SCAN_LACK_REFERENCED_PAGE;
1712 result = SCAN_SUCCEED;
1716 pte_unmap_unlock(pte, ptl);
1718 node = khugepaged_find_target_node();
1719 /* collapse_huge_page will return with the mmap_lock released */
1720 #ifdef CONFIG_FINEGRAINED_THP
1721 collapse_huge_page(mm, address, hpage, node,
1722 referenced, unmapped, hpage_type);
1724 collapse_huge_page(mm, address, hpage, node,
1725 referenced, unmapped);
1729 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1730 none_or_zero, result, unmapped);
1734 static void collect_mm_slot(struct mm_slot *mm_slot)
1736 struct mm_struct *mm = mm_slot->mm;
1738 lockdep_assert_held(&khugepaged_mm_lock);
1740 if (khugepaged_test_exit(mm)) {
1741 #ifdef CONFIG_FINEGRAINED_THP
1742 clear_hint_list(mm_slot);
1745 hash_del(&mm_slot->hash);
1746 list_del(&mm_slot->mm_node);
1749 * Not strictly needed because the mm exited already.
1751 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1754 /* khugepaged_mm_lock actually not necessary for the below */
1755 free_mm_slot(mm_slot);
1762 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1763 * khugepaged should try to collapse the page table.
1765 #ifdef CONFIG_FINEGRAINED_THP
1766 static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1767 unsigned long addr, int hpage_type)
1769 static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1773 struct mm_slot *mm_slot;
1775 #ifdef CONFIG_FINEGRAINED_THP
1776 VM_BUG_ON(addr & (hpage_type == THP_TYPE_64KB ?
1777 ~HPAGE_CONT_PTE_MASK :~HPAGE_PMD_MASK));
1779 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1782 spin_lock(&khugepaged_mm_lock);
1783 mm_slot = get_mm_slot(mm);
1784 #ifdef CONFIG_FINEGRAINED_THP
1785 if (hpage_type == THP_TYPE_64KB)
1788 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1789 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1790 spin_unlock(&khugepaged_mm_lock);
1795 * Try to collapse a pte-mapped THP for mm at address haddr.
1797 * This function checks whether all the PTEs in the PMD are pointing to the
1798 * right THP. If so, retract the page table so the THP can refault in with
1801 void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1803 unsigned long haddr = addr & HPAGE_PMD_MASK;
1804 struct vm_area_struct *vma = find_vma(mm, haddr);
1806 pte_t *start_pte, *pte;
1811 #ifdef CONFIG_FINEGRAINED_THP
1812 int hpage_type = (addr & 0x01) ? THP_TYPE_64KB : THP_TYPE_2MB;
1813 int hpage_nr = (hpage_type == THP_TYPE_64KB) ?
1814 HPAGE_CONT_PTE_NR : HPAGE_PMD_NR;
1815 int hpage_size = (hpage_type == THP_TYPE_64KB) ?
1816 HPAGE_CONT_PTE_SIZE : HPAGE_PMD_SIZE;
1818 if (hpage_type == THP_TYPE_64KB)
1819 haddr = addr & HPAGE_CONT_PTE_MASK;
1822 #ifdef CONFIG_FINEGRAINED_THP
1823 if (!vma || !vma->vm_file ||
1824 vma->vm_start > haddr || vma->vm_end < haddr + hpage_size)
1826 #else /* CONFIG_FINEGRAINED_THP */
1827 if (!vma || !vma->vm_file ||
1828 vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
1830 #endif /* CONFIG_FINEGRAINED_THP */
1833 * This vm_flags may not have VM_HUGEPAGE if the page was not
1834 * collapsed by this mm. But we can still collapse if the page is
1835 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1836 * will not fail the vma for missing VM_HUGEPAGE
1838 if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1841 hpage = find_lock_page(vma->vm_file->f_mapping,
1842 linear_page_index(vma, haddr));
1846 if (!PageHead(hpage))
1849 pmd = mm_find_pmd(mm, haddr);
1853 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1854 #ifdef CONFIG_FINEGRAINED_THP
1855 if (pte_cont(*start_pte)) {
1856 pte_unmap_unlock(start_pte, ptl);
1861 /* step 1: check all mapped PTEs are to the right huge page */
1862 for (i = 0, addr = haddr, pte = start_pte;
1863 #ifdef CONFIG_FINEGRAINED_THP
1868 i++, addr += PAGE_SIZE, pte++) {
1871 /* empty pte, skip */
1875 /* page swapped out, abort */
1876 if (!pte_present(*pte))
1879 page = vm_normal_page(vma, addr, *pte);
1882 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1883 * page table, but the new page will not be a subpage of hpage.
1885 if (hpage + i != page)
1890 /* step 2: adjust rmap */
1891 for (i = 0, addr = haddr, pte = start_pte;
1892 #ifdef CONFIG_FINEGRAINED_THP
1897 i++, addr += PAGE_SIZE, pte++) {
1902 page = vm_normal_page(vma, addr, *pte);
1903 page_remove_rmap(page, false);
1906 pte_unmap_unlock(start_pte, ptl);
1908 /* step 3: set proper refcount and mm_counters. */
1910 page_ref_sub(hpage, count);
1911 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1914 /* step 4: collapse pmd */
1915 ptl = pmd_lock(vma->vm_mm, pmd);
1916 #ifdef CONFIG_FINEGRAINED_THP
1917 if (hpage_type == THP_TYPE_64KB) {
1918 pte_t *ptep = pte_offset_map(pmd, haddr);
1919 arch_clear_huge_pte_range(vma->vm_mm, haddr, ptep);
1922 _pmd = pmdp_collapse_flush(vma, haddr, pmd);
1925 pte_free(mm, pmd_pgtable(_pmd));
1927 #else /* CONFIG_FINEGRAINED_THP*/
1928 _pmd = pmdp_collapse_flush(vma, haddr, pmd);
1931 pte_free(mm, pmd_pgtable(_pmd));
1932 #endif /* CONFIG_FINEGRAINED_THP */
1940 pte_unmap_unlock(start_pte, ptl);
1944 static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1946 struct mm_struct *mm = mm_slot->mm;
1949 if (likely(mm_slot->nr_pte_mapped_thp == 0))
1952 if (!mmap_write_trylock(mm))
1955 if (unlikely(khugepaged_test_exit(mm)))
1958 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1959 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1962 mm_slot->nr_pte_mapped_thp = 0;
1963 mmap_write_unlock(mm);
1967 #ifdef CONFIG_FINEGRAINED_THP
1968 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
1971 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1974 struct vm_area_struct *vma;
1975 struct mm_struct *mm;
1978 #ifdef CONFIG_FINEGRAINED_THP
1980 int hpage_size = (hpage_type == THP_TYPE_64KB) ?
1981 HPAGE_CONT_PTE_SIZE : HPAGE_PMD_SIZE;
1982 #endif /* CONFIG_FINEGRAINED_THP */
1984 i_mmap_lock_write(mapping);
1985 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1987 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1988 * got written to. These VMAs are likely not worth investing
1989 * mmap_write_lock(mm) as PMD-mapping is likely to be split
1992 * Not that vma->anon_vma check is racy: it can be set up after
1993 * the check but before we took mmap_lock by the fault path.
1994 * But page lock would prevent establishing any new ptes of the
1995 * page, so we are safe.
1997 * An alternative would be drop the check, but check that page
1998 * table is clear before calling pmdp_collapse_flush() under
1999 * ptl. It has higher chance to recover THP for the VMA, but
2000 * has higher cost too.
2004 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
2005 #ifdef CONFIG_FINEGRAINED_THP
2006 if (hpage_type == THP_TYPE_64KB && addr & ~HPAGE_CONT_PTE_MASK)
2008 else if (hpage_type == THP_TYPE_2MB && addr & ~HPAGE_PMD_MASK)
2010 if (vma->vm_end < addr + hpage_size)
2014 pmd = mm_find_pmd(mm, addr);
2017 if (mmap_write_trylock(mm)) {
2018 spinlock_t *ptl = pmd_lock(mm, pmd);
2019 if (hpage_type == THP_TYPE_64KB) {
2021 ptep = pte_offset_map(pmd, addr);
2022 /* pte maps are established on page fault handling */
2023 arch_clear_huge_pte_range(mm, addr, ptep);
2028 * We need exclusive mmap_sem to retract page table.
2030 * We use trylock due to lock inversion: we need to acquire
2031 * mmap_sem while holding page lock. Fault path does it in
2032 * reverse order. Trylock is a way to avoid deadlock.
2034 _pmd = pmdp_collapse_flush(vma, addr, pmd);
2038 pte_free(mm, pmd_pgtable(_pmd));
2040 mmap_write_unlock(mm);
2042 khugepaged_add_pte_mapped_thp(vma->vm_mm, addr, hpage_type);
2043 #else /* CONFIG_FINEGRAINED_THP */
2044 if (addr & ~HPAGE_PMD_MASK)
2046 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
2049 pmd = mm_find_pmd(mm, addr);
2053 * We need exclusive mmap_lock to retract page table.
2055 * We use trylock due to lock inversion: we need to acquire
2056 * mmap_lock while holding page lock. Fault path does it in
2057 * reverse order. Trylock is a way to avoid deadlock.
2059 if (mmap_write_trylock(mm)) {
2060 if (!khugepaged_test_exit(mm)) {
2061 spinlock_t *ptl = pmd_lock(mm, pmd);
2062 /* assume page table is clear */
2063 _pmd = pmdp_collapse_flush(vma, addr, pmd);
2066 pte_free(mm, pmd_pgtable(_pmd));
2068 mmap_write_unlock(mm);
2070 /* Try again later */
2071 khugepaged_add_pte_mapped_thp(mm, addr);
2073 #endif /* CONFIG_FINEGRAINED_THP */
2075 i_mmap_unlock_write(mapping);
2079 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
2081 * Basic scheme is simple, details are more complex:
2082 * - allocate and lock a new huge page;
2083 * - scan page cache replacing old pages with the new one
2084 * + swap/gup in pages if necessary;
2086 * + keep old pages around in case rollback is required;
2087 * - if replacing succeeds:
2090 * + unlock huge page;
2091 * - if replacing failed;
2092 * + put all pages back and unfreeze them;
2093 * + restore gaps in the page cache;
2094 * + unlock and free huge page;
2096 #ifdef CONFIG_FINEGRAINED_THP
2097 static void collapse_file(struct mm_struct *mm,
2098 struct file *file, pgoff_t start,
2099 struct page **hpage, int node, int hpage_type)
2100 #else /* CONFIG_FINEGRAINED_THP */
2101 static void collapse_file(struct mm_struct *mm,
2102 struct file *file, pgoff_t start,
2103 struct page **hpage, int node)
2104 #endif /* CONFIG_FINEGRAINED_THP */
2106 struct address_space *mapping = file->f_mapping;
2108 struct page *new_page;
2109 #ifdef CONFIG_FINEGRAINED_THP
2110 int hpage_nr = (hpage_type == THP_TYPE_64KB ?
2111 HPAGE_CONT_PTE_NR : HPAGE_PMD_NR);
2112 int hpage_order = (hpage_type == THP_TYPE_64KB ?
2113 HPAGE_CONT_PTE_ORDER : HPAGE_PMD_ORDER);
2114 pgoff_t index, end = start + hpage_nr;
2115 #else /* CONFIG_FINEGRAINED_THP */
2116 pgoff_t index, end = start + HPAGE_PMD_NR;
2117 #endif /* CONFIG_FINEGRAINED_THP */
2118 LIST_HEAD(pagelist);
2119 #ifdef CONFIG_FINEGRAINED_THP
2120 XA_STATE_ORDER(xas, &mapping->i_pages, start, hpage_order);
2122 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
2124 int nr_none = 0, result = SCAN_SUCCEED;
2125 bool is_shmem = shmem_file(file);
2127 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
2128 #ifdef CONFIG_FINEGRAINED_THP
2129 VM_BUG_ON(start & (hpage_nr - 1));
2131 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
2134 /* Only allocate from the target node */
2135 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
2137 #ifdef CONFIG_FINEGRAINED_THP
2138 new_page = khugepaged_alloc_page(hpage, gfp, node, hpage_type);
2140 new_page = khugepaged_alloc_page(hpage, gfp, node);
2143 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
2147 if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
2148 result = SCAN_CGROUP_CHARGE_FAIL;
2151 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
2153 /* This will be less messy when we use multi-index entries */
2156 xas_create_range(&xas);
2157 if (!xas_error(&xas))
2159 xas_unlock_irq(&xas);
2160 if (!xas_nomem(&xas, GFP_KERNEL)) {
2166 __SetPageLocked(new_page);
2168 __SetPageSwapBacked(new_page);
2169 new_page->index = start;
2170 new_page->mapping = mapping;
2173 * At this point the new_page is locked and not up-to-date.
2174 * It's safe to insert it into the page cache, because nobody would
2175 * be able to map it or use it in another way until we unlock it.
2178 xas_set(&xas, start);
2179 for (index = start; index < end; index++) {
2180 struct page *page = xas_next(&xas);
2182 VM_BUG_ON(index != xas.xa_index);
2186 * Stop if extent has been truncated or
2187 * hole-punched, and is now completely
2190 if (index == start) {
2191 if (!xas_next_entry(&xas, end - 1)) {
2192 result = SCAN_TRUNCATED;
2195 xas_set(&xas, index);
2197 if (!shmem_charge(mapping->host, 1)) {
2201 xas_store(&xas, new_page);
2206 if (xa_is_value(page) || !PageUptodate(page)) {
2207 xas_unlock_irq(&xas);
2208 /* swap in or instantiate fallocated page */
2209 if (shmem_getpage(mapping->host, index, &page,
2214 } else if (trylock_page(page)) {
2216 xas_unlock_irq(&xas);
2218 result = SCAN_PAGE_LOCK;
2221 } else { /* !is_shmem */
2222 if (!page || xa_is_value(page)) {
2223 xas_unlock_irq(&xas);
2224 page_cache_sync_readahead(mapping, &file->f_ra,
2227 /* drain pagevecs to help isolate_lru_page() */
2229 page = find_lock_page(mapping, index);
2230 if (unlikely(page == NULL)) {
2234 } else if (PageDirty(page)) {
2236 * khugepaged only works on read-only fd,
2237 * so this page is dirty because it hasn't
2238 * been flushed since first write. There
2239 * won't be new dirty pages.
2241 * Trigger async flush here and hope the
2242 * writeback is done when khugepaged
2243 * revisits this page.
2245 * This is a one-off situation. We are not
2246 * forcing writeback in loop.
2248 xas_unlock_irq(&xas);
2249 filemap_flush(mapping);
2252 } else if (PageWriteback(page)) {
2253 xas_unlock_irq(&xas);
2256 } else if (trylock_page(page)) {
2258 xas_unlock_irq(&xas);
2260 result = SCAN_PAGE_LOCK;
2266 * The page must be locked, so we can drop the i_pages lock
2267 * without racing with truncate.
2269 VM_BUG_ON_PAGE(!PageLocked(page), page);
2271 /* make sure the page is up to date */
2272 if (unlikely(!PageUptodate(page))) {
2278 * If file was truncated then extended, or hole-punched, before
2279 * we locked the first page, then a THP might be there already.
2281 if (PageTransCompound(page)) {
2282 result = SCAN_PAGE_COMPOUND;
2286 if (page_mapping(page) != mapping) {
2287 result = SCAN_TRUNCATED;
2291 if (!is_shmem && (PageDirty(page) ||
2292 PageWriteback(page))) {
2294 * khugepaged only works on read-only fd, so this
2295 * page is dirty because it hasn't been flushed
2296 * since first write.
2302 if (isolate_lru_page(page)) {
2303 result = SCAN_DEL_PAGE_LRU;
2307 if (page_has_private(page) &&
2308 !try_to_release_page(page, GFP_KERNEL)) {
2309 result = SCAN_PAGE_HAS_PRIVATE;
2310 putback_lru_page(page);
2314 if (page_mapped(page))
2315 unmap_mapping_pages(mapping, index, 1, false);
2318 xas_set(&xas, index);
2320 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
2321 VM_BUG_ON_PAGE(page_mapped(page), page);
2324 * The page is expected to have page_count() == 3:
2325 * - we hold a pin on it;
2326 * - one reference from page cache;
2327 * - one from isolate_lru_page;
2329 if (!page_ref_freeze(page, 3)) {
2330 result = SCAN_PAGE_COUNT;
2331 xas_unlock_irq(&xas);
2332 putback_lru_page(page);
2337 * Add the page to the list to be able to undo the collapse if
2338 * something go wrong.
2340 list_add_tail(&page->lru, &pagelist);
2342 /* Finally, replace with the new page. */
2343 xas_store(&xas, new_page);
2352 #ifdef CONFIG_FINEGRAINED_THP
2353 if (hpage_type == THP_TYPE_64KB)
2354 __inc_node_page_state(new_page, NR_SHMEM_64KB_THPS);
2356 __inc_node_page_state(new_page, NR_SHMEM_THPS);
2357 #else /* CONFIG_FINEGRAINED_THP */
2358 __inc_node_page_state(new_page, NR_SHMEM_THPS);
2359 #endif /* CONFIG_FINEGRAINED_THP */
2361 #ifdef CONFIG_FINEGRAINED_THP
2362 if (hpage_type == THP_TYPE_64KB)
2363 __inc_node_page_state(new_page, NR_FILE_64KB_THPS);
2365 __inc_node_page_state(new_page, NR_FILE_THPS);
2366 #else /* CONFIG_FINEGRAINED_THP */
2367 __inc_node_page_state(new_page, NR_FILE_THPS);
2368 #endif /* CONFIG_FINEGRAINED_THP */
2369 filemap_nr_thps_inc(mapping);
2373 __mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
2375 __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
2379 xas_unlock_irq(&xas);
2382 if (result == SCAN_SUCCEED) {
2383 struct page *page, *tmp;
2384 #ifdef CONFIG_FINEGRAINED_THP
2389 * Replacing old pages with new one has succeeded, now we
2390 * need to copy the content and free the old pages.
2393 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2394 #ifdef CONFIG_FINEGRAINED_THP
2395 if (hpage_type != THP_TYPE_64KB) {
2396 while (index < page->index) {
2397 clear_highpage(new_page + (index % HPAGE_PMD_NR));
2402 if (hpage_type == THP_TYPE_64KB) {
2403 copy_highpage(new_page + offset, page);
2406 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
2408 #else /* CONFIG_FINEGRAINED_THP */
2409 while (index < page->index) {
2410 clear_highpage(new_page + (index % HPAGE_PMD_NR));
2413 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
2415 #endif /* CONFIG_FINEGRAINED_THP */
2416 list_del(&page->lru);
2417 page->mapping = NULL;
2418 page_ref_unfreeze(page, 1);
2419 ClearPageActive(page);
2420 ClearPageUnevictable(page);
2425 #ifdef CONFIG_FINEGRAINED_THP
2426 if (hpage_type == THP_TYPE_64KB) {
2427 while (index < end) {
2428 clear_highpage(new_page + offset);
2433 while (index < end) {
2434 clear_highpage(new_page + (index % HPAGE_PMD_NR));
2438 #else /* CONFIG_FINEGRAINED_THP */
2439 while (index < end) {
2440 clear_highpage(new_page + (index % HPAGE_PMD_NR));
2443 #endif /* CONFIG_FINEGRAINED_THP */
2445 SetPageUptodate(new_page);
2446 #ifdef CONFIG_FINEGRAINED_THP
2447 page_ref_add(new_page, hpage_nr - 1);
2449 page_ref_add(new_page, HPAGE_PMD_NR - 1);
2452 set_page_dirty(new_page);
2453 lru_cache_add(new_page);
2456 * Remove pte page tables, so we can re-fault the page as huge.
2458 #ifdef CONFIG_FINEGRAINED_THP
2459 retract_page_tables(mapping, start, hpage_type);
2460 if (hpage_type == THP_TYPE_2MB)
2462 #else /* CONFIG_FINEGRAINED_THP */
2463 retract_page_tables(mapping, start);
2465 #endif /* CONFIG_FINEGRAINED_THP */
2466 khugepaged_pages_collapsed++;
2470 /* Something went wrong: roll back page cache changes */
2472 mapping->nrpages -= nr_none;
2475 shmem_uncharge(mapping->host, nr_none);
2477 xas_set(&xas, start);
2478 xas_for_each(&xas, page, end - 1) {
2479 page = list_first_entry_or_null(&pagelist,
2481 if (!page || xas.xa_index < page->index) {
2485 /* Put holes back where they were */
2486 xas_store(&xas, NULL);
2490 VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
2492 /* Unfreeze the page. */
2493 list_del(&page->lru);
2494 page_ref_unfreeze(page, 2);
2495 xas_store(&xas, page);
2497 xas_unlock_irq(&xas);
2499 putback_lru_page(page);
2503 xas_unlock_irq(&xas);
2505 new_page->mapping = NULL;
2508 unlock_page(new_page);
2510 #ifdef CONFIG_FINEGRAINED_THP
2511 if (result != SCAN_SUCCEED && new_page && hpage_type == THP_TYPE_64KB)
2514 VM_BUG_ON(!list_empty(&pagelist));
2515 if (!IS_ERR_OR_NULL(*hpage))
2516 mem_cgroup_uncharge(*hpage);
2517 /* TODO: tracepoints */
2520 #ifdef CONFIG_FINEGRAINED_THP
2521 static void khugepaged_scan_file(struct mm_struct *mm,
2522 struct file *file, pgoff_t start, struct page **hpage,
2524 #else /* CONFIG_FINEGRAINED_THP */
2525 static void khugepaged_scan_file(struct mm_struct *mm,
2526 struct file *file, pgoff_t start, struct page **hpage)
2527 #endif /* CONFIG_FINEGRAINED_THP */
2529 struct page *page = NULL;
2530 struct address_space *mapping = file->f_mapping;
2531 XA_STATE(xas, &mapping->i_pages, start);
2533 int node = NUMA_NO_NODE;
2534 int result = SCAN_SUCCEED;
2535 #ifdef CONFIG_FINEGRAINED_THP
2537 int max_ptes_swap, max_ptes_none, max_ptes_shared;
2539 if (hpage_type == THP_TYPE_64KB) {
2540 hpage_nr = HPAGE_CONT_PTE_NR; /* 64KB */
2541 max_ptes_swap = khugepaged_max_ptes_swap_64kb;
2542 max_ptes_none = khugepaged_max_ptes_none_64kb;
2543 max_ptes_shared = khugepaged_max_ptes_shared_64kb;
2545 hpage_nr = HPAGE_PMD_NR; /* 2MB */
2546 max_ptes_swap = khugepaged_max_ptes_swap;
2547 max_ptes_none = khugepaged_max_ptes_none;
2548 max_ptes_shared = khugepaged_max_ptes_shared;
2550 #endif /* CONFIG_FINEGRAINED_THP */
2554 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
2556 #ifdef CONFIG_FINEGRAINED_THP
2557 xas_for_each(&xas, page, start + hpage_nr - 1)
2559 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1)
2562 if (xas_retry(&xas, page))
2565 if (xa_is_value(page)) {
2566 #ifdef CONFIG_FINEGRAINED_THP
2567 if (++swap > max_ptes_swap)
2569 if (++swap > khugepaged_max_ptes_swap)
2572 result = SCAN_EXCEED_SWAP_PTE;
2578 if (PageTransCompound(page)) {
2579 result = SCAN_PAGE_COMPOUND;
2583 node = page_to_nid(page);
2584 if (khugepaged_scan_abort(node)) {
2585 result = SCAN_SCAN_ABORT;
2588 khugepaged_node_load[node]++;
2590 if (!PageLRU(page)) {
2591 result = SCAN_PAGE_LRU;
2595 if (page_count(page) !=
2596 1 + page_mapcount(page) + page_has_private(page)) {
2597 result = SCAN_PAGE_COUNT;
2602 * We probably should check if the page is referenced here, but
2603 * nobody would transfer pte_young() to PageReferenced() for us.
2604 * And rmap walk here is just too costly...
2609 if (need_resched()) {
2616 if (result == SCAN_SUCCEED) {
2617 #ifdef CONFIG_FINEGRAINED_THP
2618 if (present < hpage_nr - max_ptes_none)
2620 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none)
2623 result = SCAN_EXCEED_NONE_PTE;
2625 node = khugepaged_find_target_node();
2626 #ifdef CONFIG_FINEGRAINED_THP
2627 collapse_file(mm, file, start, hpage, node, hpage_type);
2629 collapse_file(mm, file, start, hpage, node);
2634 /* TODO: tracepoints */
2637 #ifdef CONFIG_FINEGRAINED_THP
2638 static void khugepaged_scan_file(struct mm_struct *mm,
2639 struct file *file, pgoff_t start, struct page **hpage,
2641 #else /* CONFIG_FINEGRAINED_THP */
2642 static void khugepaged_scan_file(struct mm_struct *mm,
2643 struct file *file, pgoff_t start, struct page **hpage)
2644 #endif /* CONFIG_FINEGRAINED_THP */
2649 static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
2655 #ifdef CONFIG_FINEGRAINED_THP
2657 * if return value > 0 -> vma can make hugepage
2658 * calculated hugepage start and hugepage end are stored in pointers
2659 * otherwise -> vma cannot make hugepage
2661 static inline int hugepage_determine_htype(unsigned long vm_start,
2662 unsigned long vm_end, unsigned long *hstart, unsigned long *hend) {
2663 unsigned long start, end;
2665 /* determine 2MB hugepage */
2666 start = (vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2667 end = vm_end & HPAGE_PMD_MASK;
2669 /* determine 64KB hugepage */
2670 start = (vm_start + ~HPAGE_CONT_PTE_MASK) & HPAGE_CONT_PTE_MASK;
2671 end = vm_end & HPAGE_CONT_PTE_MASK;
2673 return THP_TYPE_FAIL;
2676 return THP_TYPE_64KB;
2680 return THP_TYPE_2MB;
2684 KHUGEPAGE_SCAN_CONTINUE,
2685 KHUGEPAGE_SCAN_BREAK,
2686 KHUGEPAGE_SCAN_BREAK_MMAP_LOCK,
2689 static unsigned int khugepaged_scan_vma(struct mm_struct *mm,
2690 struct vm_area_struct *vma, struct page **hpage,
2691 unsigned int pages, int *progress)
2693 unsigned long hstart, hend;
2694 int hpage_type, ret;
2695 int hpage_size, hpage_nr;
2697 if (!hugepage_vma_check(vma, vma->vm_flags))
2698 return KHUGEPAGE_SCAN_CONTINUE;
2700 hpage_type = hugepage_determine_htype(
2701 (vma->vm_start > khugepaged_scan.address) ?
2702 vma->vm_start : khugepaged_scan.address,
2703 vma->vm_end, &hstart, &hend);
2705 if (hpage_type == THP_TYPE_FAIL)
2706 return KHUGEPAGE_SCAN_CONTINUE;
2707 if (khugepaged_scan.address > hend)
2708 return KHUGEPAGE_SCAN_CONTINUE;
2709 if (khugepaged_scan.address < hstart)
2710 khugepaged_scan.address = hstart;
2712 if (hpage_type == THP_TYPE_64KB) {
2713 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_CONT_PTE_MASK);
2714 hpage_size = HPAGE_CONT_PTE_SIZE; /* 64KB */
2715 hpage_nr = HPAGE_CONT_PTE_NR;
2716 } else if (hpage_type == THP_TYPE_2MB) {
2717 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2718 hpage_size = HPAGE_PMD_SIZE; /* 2MB */
2719 hpage_nr = HPAGE_PMD_NR;
2720 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
2721 !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
2723 /* fallback, vma or file not aligned to 2MB */
2724 hpage_size = HPAGE_CONT_PTE_SIZE; /* 64KB */
2725 hpage_nr = HPAGE_CONT_PTE_NR;
2726 hpage_type = THP_TYPE_64KB;
2731 while (khugepaged_scan.address < hend) {
2732 if (khugepaged_scan.address + hpage_size > hend) {
2733 if (khugepaged_scan.address + HPAGE_CONT_PTE_SIZE < hend) {
2734 hpage_size = HPAGE_CONT_PTE_SIZE;
2735 hpage_nr = HPAGE_CONT_PTE_NR;
2736 hpage_type = THP_TYPE_64KB;
2741 if (unlikely(khugepaged_test_exit(mm)))
2742 return KHUGEPAGE_SCAN_BREAK;
2744 VM_BUG_ON(khugepaged_scan.address < hstart ||
2745 khugepaged_scan.address + hpage_size >
2747 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2748 struct file *file = get_file(vma->vm_file);
2749 pgoff_t pgoff = linear_page_index(vma,
2750 khugepaged_scan.address);
2752 mmap_read_unlock(mm);
2754 khugepaged_scan_file(mm, file, pgoff, hpage, hpage_type);
2757 ret = khugepaged_scan_pmd(mm, vma,
2758 khugepaged_scan.address,
2761 /* move to next address */
2762 khugepaged_scan.address += hpage_size;
2763 *progress += hpage_nr;
2765 /* we released mmap_sem so break loop */
2766 return KHUGEPAGE_SCAN_BREAK_MMAP_LOCK;
2767 if (*progress >= pages)
2768 return KHUGEPAGE_SCAN_BREAK;
2770 return KHUGEPAGE_SCAN_CONTINUE;
2773 static struct thp_scan_hint *find_scan_hint(struct mm_slot *slot,
2776 struct thp_scan_hint *hint;
2778 list_for_each_entry(hint, &khugepaged_scan.hint_list, hint_list) {
2779 if (hint->slot == slot)
2785 #ifdef CONFIG_THP_CONSERVATIVE
2786 /* caller must hold a proper mmap_lock */
2787 void khugepaged_mem_hook(struct mm_struct *mm, unsigned long addr,
2788 long diff, const char *debug)
2790 struct mm_slot *slot;
2791 struct vm_area_struct *vma;
2792 struct thp_scan_hint *hint;
2793 bool wakeup = false;
2796 vma = find_vma(mm, addr);
2797 if (!hugepage_vma_check(vma, vma->vm_flags))
2801 spin_lock(&khugepaged_mm_lock);
2802 slot = get_mm_slot(mm);
2804 /* make a new slot or go out */
2805 spin_unlock(&khugepaged_mm_lock);
2808 if (__khugepaged_enter(mm))
2814 hint = find_scan_hint(slot, addr);
2816 spin_unlock(&khugepaged_mm_lock);
2817 hint = kzalloc(sizeof(struct thp_scan_hint), GFP_KERNEL);
2821 hint->jiffies = jiffies;
2822 spin_lock(&khugepaged_mm_lock);
2823 list_add(&hint->hint_list, &khugepaged_scan.hint_list);
2824 khugepaged_scan.nr_hint++;
2827 if (hint->diff >= HPAGE_CONT_PTE_SIZE) {
2829 //list_move(&hint->hint_list, &khugepaged_scan.hint_list);
2831 spin_unlock(&khugepaged_mm_lock);
2833 /* if possible, wake khugepaged up for starting a scan */
2835 wake_up_interruptible(&khugepaged_wait);
2838 #else /* CONFIG_THP_CONSERVATIVE */
2839 void khugepaged_mem_hook(struct mm_struct *mm,
2840 unsigned long addr, long diff, const char *debug)
2842 #endif /* CONFIG_THP_CONSERVATIVE */
2844 static void clear_hint_list(struct mm_slot *slot)
2846 struct thp_scan_hint *hint;
2847 hint = find_scan_hint(slot, 0);
2849 list_del(&hint->hint_list);
2851 khugepaged_scan.nr_hint--;
2855 static struct thp_scan_hint *get_next_hint(void)
2857 if (!list_empty(&khugepaged_scan.hint_list)) {
2858 struct thp_scan_hint *hint = list_first_entry(
2859 &khugepaged_scan.hint_list,
2860 struct thp_scan_hint, hint_list);
2861 list_del(&hint->hint_list);
2862 khugepaged_scan.nr_hint--;
2867 #endif /* CONFIG_FINEGRAINED_THP */
2869 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2870 struct page **hpage)
2871 __releases(&khugepaged_mm_lock)
2872 __acquires(&khugepaged_mm_lock)
2874 struct mm_slot *mm_slot;
2875 struct mm_struct *mm;
2876 struct vm_area_struct *vma;
2880 lockdep_assert_held(&khugepaged_mm_lock);
2882 #ifdef CONFIG_FINEGRAINED_THP
2883 if (khugepaged_scan.mm_slot)
2884 mm_slot = khugepaged_scan.mm_slot;
2885 else if (!list_empty(&khugepaged_scan.hint_list)) {
2886 struct thp_scan_hint *hint;
2888 unsigned long jiffies_diff;
2891 hint = get_next_hint();
2895 mm_slot = hint->slot;
2896 mem_diff = hint->diff;
2897 jiffies_diff = jiffies - hint->jiffies;
2899 clear_hint_list(mm_slot);
2901 if (khugepaged_test_exit(mm_slot->mm))
2903 khugepaged_scan.address = 0;
2904 khugepaged_scan.mm_slot = mm_slot;
2907 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2908 struct mm_slot, mm_node);
2909 clear_hint_list(mm_slot);
2910 khugepaged_scan.address = 0;
2911 khugepaged_scan.mm_slot = mm_slot;
2913 #else /* CONFIG_FINEGRAINED_THP */
2914 if (khugepaged_scan.mm_slot)
2915 mm_slot = khugepaged_scan.mm_slot;
2917 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2918 struct mm_slot, mm_node);
2919 khugepaged_scan.address = 0;
2920 khugepaged_scan.mm_slot = mm_slot;
2922 #endif /* CONFIG_FINEGRAINED_THP */
2923 spin_unlock(&khugepaged_mm_lock);
2924 khugepaged_collapse_pte_mapped_thps(mm_slot);
2928 * Don't wait for semaphore (to avoid long wait times). Just move to
2929 * the next mm on the list.
2932 if (unlikely(!mmap_read_trylock(mm)))
2933 goto breakouterloop_mmap_lock;
2934 if (likely(!khugepaged_test_exit(mm)))
2935 vma = find_vma(mm, khugepaged_scan.address);
2938 for (; vma; vma = vma->vm_next) {
2939 #ifdef CONFIG_FINEGRAINED_THP
2942 unsigned long hstart, hend;
2946 if (unlikely(khugepaged_test_exit(mm))) {
2950 #ifdef CONFIG_FINEGRAINED_THP
2951 ret = khugepaged_scan_vma(mm, vma, hpage, pages, &progress);
2953 if (ret == KHUGEPAGE_SCAN_CONTINUE) {
2956 } else if (ret == KHUGEPAGE_SCAN_BREAK)
2957 goto breakouterloop;
2958 else if (ret == KHUGEPAGE_SCAN_BREAK_MMAP_LOCK)
2959 goto breakouterloop_mmap_lock;
2960 #else /* CONFIG_FINEGRAINED_THP */
2961 if (!hugepage_vma_check(vma, vma->vm_flags)) {
2966 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2967 hend = vma->vm_end & HPAGE_PMD_MASK;
2970 if (khugepaged_scan.address > hend)
2972 if (khugepaged_scan.address < hstart)
2973 khugepaged_scan.address = hstart;
2974 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2975 if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
2978 while (khugepaged_scan.address < hend) {
2981 if (unlikely(khugepaged_test_exit(mm)))
2982 goto breakouterloop;
2984 VM_BUG_ON(khugepaged_scan.address < hstart ||
2985 khugepaged_scan.address + HPAGE_PMD_SIZE >
2987 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2988 struct file *file = get_file(vma->vm_file);
2989 pgoff_t pgoff = linear_page_index(vma,
2990 khugepaged_scan.address);
2992 mmap_read_unlock(mm);
2994 khugepaged_scan_file(mm, file, pgoff, hpage);
2997 ret = khugepaged_scan_pmd(mm, vma,
2998 khugepaged_scan.address,
3001 /* move to next address */
3002 khugepaged_scan.address += HPAGE_PMD_SIZE;
3003 progress += HPAGE_PMD_NR;
3005 /* we released mmap_lock so break loop */
3006 goto breakouterloop_mmap_lock;
3007 if (progress >= pages)
3008 goto breakouterloop;
3010 #endif /* CONFIG_FINEGRAINED_THP */
3013 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
3014 breakouterloop_mmap_lock:
3016 spin_lock(&khugepaged_mm_lock);
3017 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
3019 * Release the current mm_slot if this mm is about to die, or
3020 * if we scanned all vmas of this mm.
3022 if (khugepaged_test_exit(mm) || !vma) {
3024 * Make sure that if mm_users is reaching zero while
3025 * khugepaged runs here, khugepaged_exit will find
3026 * mm_slot not pointing to the exiting mm.
3028 #ifdef CONFIG_FINEGRAINED_THP
3029 if (!list_empty(&khugepaged_scan.hint_list)) {
3030 unsigned long jiffies_diff;
3032 struct thp_scan_hint *hint;
3033 struct mm_slot *next_slot;
3036 hint = get_next_hint();
3040 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head)
3041 goto get_next_slot2;
3046 mem_diff = hint->diff;
3047 jiffies_diff = jiffies - hint->jiffies;
3048 next_slot = hint->slot;
3051 if (next_slot == mm_slot)
3052 goto get_next_hint2;
3054 if (!khugepaged_test_exit(next_slot->mm)) {
3055 list_move(&next_slot->mm_node, &mm_slot->mm_node);
3056 clear_hint_list(next_slot);
3058 goto get_next_hint2;
3060 khugepaged_scan.mm_slot = next_slot;
3061 khugepaged_scan.address = 0;
3062 } else if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
3064 khugepaged_scan.mm_slot = list_entry(
3065 mm_slot->mm_node.next,
3066 struct mm_slot, mm_node);
3067 clear_hint_list(khugepaged_scan.mm_slot);
3068 khugepaged_scan.address = 0;
3071 khugepaged_scan.mm_slot = NULL;
3072 khugepaged_full_scans++;
3074 #else /* CONFIG_FINEGRAINED_THP */
3075 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
3076 khugepaged_scan.mm_slot = list_entry(
3077 mm_slot->mm_node.next,
3078 struct mm_slot, mm_node);
3079 khugepaged_scan.address = 0;
3081 khugepaged_scan.mm_slot = NULL;
3082 khugepaged_full_scans++;
3084 #endif /* CONFIG_FINEGRAINED_THP */
3085 collect_mm_slot(mm_slot);
3091 static int khugepaged_has_work(void)
3093 return !list_empty(&khugepaged_scan.mm_head) &&
3094 khugepaged_enabled();
3097 static int khugepaged_wait_event(void)
3099 return !list_empty(&khugepaged_scan.mm_head) ||
3100 kthread_should_stop();
3103 static void khugepaged_do_scan(void)
3105 struct page *hpage = NULL;
3106 unsigned int progress = 0, pass_through_head = 0;
3107 unsigned int pages = khugepaged_pages_to_scan;
3110 barrier(); /* write khugepaged_pages_to_scan to local stack */
3112 lru_add_drain_all();
3114 while (progress < pages) {
3115 if (!khugepaged_prealloc_page(&hpage, &wait))
3120 if (unlikely(kthread_should_stop() || try_to_freeze()))
3123 spin_lock(&khugepaged_mm_lock);
3124 if (!khugepaged_scan.mm_slot)
3125 pass_through_head++;
3126 if (khugepaged_has_work() &&
3127 pass_through_head < 2)
3128 progress += khugepaged_scan_mm_slot(pages - progress,
3132 spin_unlock(&khugepaged_mm_lock);
3135 if (!IS_ERR_OR_NULL(hpage))
3139 static bool khugepaged_should_wakeup(void)
3141 return kthread_should_stop() ||
3142 time_after_eq(jiffies, khugepaged_sleep_expire);
3145 static void khugepaged_wait_work(void)
3147 if (khugepaged_has_work()) {
3148 const unsigned long scan_sleep_jiffies =
3149 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
3151 if (!scan_sleep_jiffies)
3154 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
3155 wait_event_freezable_timeout(khugepaged_wait,
3156 khugepaged_should_wakeup(),
3157 scan_sleep_jiffies);
3161 if (khugepaged_enabled())
3162 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
3165 #include <linux/delay.h>
3166 bool eager_allocation = false;
3168 static int khugepaged(void *none)
3170 struct mm_slot *mm_slot;
3173 set_user_nice(current, MAX_NICE);
3175 while (!kthread_should_stop()) {
3176 khugepaged_do_scan();
3177 khugepaged_wait_work();
3180 spin_lock(&khugepaged_mm_lock);
3181 mm_slot = khugepaged_scan.mm_slot;
3182 khugepaged_scan.mm_slot = NULL;
3184 collect_mm_slot(mm_slot);
3185 spin_unlock(&khugepaged_mm_lock);
3189 static void set_recommended_min_free_kbytes(void)
3193 unsigned long recommended_min;
3195 for_each_populated_zone(zone) {
3197 * We don't need to worry about fragmentation of
3198 * ZONE_MOVABLE since it only has movable pages.
3200 if (zone_idx(zone) > gfp_zone(GFP_USER))
3206 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
3207 recommended_min = pageblock_nr_pages * nr_zones * 2;
3210 * Make sure that on average at least two pageblocks are almost free
3211 * of another type, one for a migratetype to fall back to and a
3212 * second to avoid subsequent fallbacks of other types There are 3
3213 * MIGRATE_TYPES we care about.
3215 recommended_min += pageblock_nr_pages * nr_zones *
3216 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
3218 /* don't ever allow to reserve more than 5% of the lowmem */
3219 recommended_min = min(recommended_min,
3220 (unsigned long) nr_free_buffer_pages() / 20);
3221 recommended_min <<= (PAGE_SHIFT-10);
3223 if (recommended_min > min_free_kbytes) {
3224 if (user_min_free_kbytes >= 0)
3225 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
3226 min_free_kbytes, recommended_min);
3228 min_free_kbytes = recommended_min;
3230 setup_per_zone_wmarks();
3233 int start_stop_khugepaged(void)
3237 mutex_lock(&khugepaged_mutex);
3238 if (khugepaged_enabled()) {
3239 if (!khugepaged_thread)
3240 khugepaged_thread = kthread_run(khugepaged, NULL,
3242 if (IS_ERR(khugepaged_thread)) {
3243 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
3244 err = PTR_ERR(khugepaged_thread);
3245 khugepaged_thread = NULL;
3249 if (!list_empty(&khugepaged_scan.mm_head))
3250 wake_up_interruptible(&khugepaged_wait);
3252 set_recommended_min_free_kbytes();
3253 } else if (khugepaged_thread) {
3254 kthread_stop(khugepaged_thread);
3255 khugepaged_thread = NULL;
3258 mutex_unlock(&khugepaged_mutex);
3262 void khugepaged_min_free_kbytes_update(void)
3264 mutex_lock(&khugepaged_mutex);
3265 if (khugepaged_enabled() && khugepaged_thread)
3266 set_recommended_min_free_kbytes();
3267 mutex_unlock(&khugepaged_mutex);