1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/swapops.h>
20 #include <linux/shmem_fs.h>
23 #include <asm/pgalloc.h>
24 #ifdef CONFIG_FINEGRAINED_THP
25 #include <asm/finegrained_thp.h>
26 #include <asm/huge_mm.h>
28 #include <asm-generic/finegrained_thp.h>
29 #include <asm-generic/huge_mm.h>
39 SCAN_EXCEED_SHARED_PTE,
43 SCAN_LACK_REFERENCED_PAGE,
57 SCAN_ALLOC_HUGE_PAGE_FAIL,
58 SCAN_CGROUP_CHARGE_FAIL,
60 SCAN_PAGE_HAS_PRIVATE,
63 #define CREATE_TRACE_POINTS
64 #include <trace/events/huge_memory.h>
66 static struct task_struct *khugepaged_thread __read_mostly;
67 static DEFINE_MUTEX(khugepaged_mutex);
69 /* default scan 8*512 pte (or vmas) every 30 second */
70 static unsigned int khugepaged_pages_to_scan __read_mostly;
71 static unsigned int khugepaged_pages_collapsed;
72 static unsigned int khugepaged_full_scans;
73 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
74 /* during fragmentation poll the hugepage allocator once every minute */
75 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
76 static unsigned long khugepaged_sleep_expire;
77 static DEFINE_SPINLOCK(khugepaged_mm_lock);
78 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
80 * default collapse hugepages if there is at least one pte mapped like
81 * it would have happened if the vma was large enough during page
84 static unsigned int khugepaged_max_ptes_none __read_mostly;
85 static unsigned int khugepaged_max_ptes_swap __read_mostly;
86 static unsigned int khugepaged_max_ptes_shared __read_mostly;
88 #ifdef CONFIG_FINEGRAINED_THP
91 * it used for providing hints to khugepaged
92 * which address space is changed recently.
94 struct thp_scan_hint {
96 struct vm_area_struct *vma;
97 unsigned long diff; /* memory difference */
98 unsigned long jiffies; /* time stamp for profiling purpose */
99 struct list_head hint_list;
102 /* THP type descriptor */
104 THP_TYPE_FAIL, /* cannot make hugepage */
105 THP_TYPE_64KB, /* 64KB hugepage can be made, use CONT_PTE */
106 THP_TYPE_2MB, /* 2MB hugepage can be made, use PMD */
109 static unsigned int khugepaged_max_ptes_none_64kb __read_mostly;
110 static unsigned int khugepaged_max_ptes_swap_64kb __read_mostly;
111 static unsigned int khugepaged_max_ptes_shared_64kb __read_mostly;
112 #endif /* CONFIG_FINEGRAINED_THP */
114 #define MM_SLOTS_HASH_BITS 10
115 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
117 static struct kmem_cache *mm_slot_cache __read_mostly;
119 #define MAX_PTE_MAPPED_THP 8
122 * struct mm_slot - hash lookup from mm to mm_slot
123 * @hash: hash collision list
124 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
125 * @mm: the mm that this information is valid for
128 struct hlist_node hash;
129 struct list_head mm_node;
130 struct mm_struct *mm;
132 /* pte-mapped THP in this mm */
133 int nr_pte_mapped_thp;
134 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
138 * struct khugepaged_scan - cursor for scanning
139 * @mm_head: the head of the mm list to scan
140 * @mm_slot: the current mm_slot we are scanning
141 * @address: the next address inside that to be scanned
143 * There is only the one khugepaged_scan instance of this cursor structure.
145 struct khugepaged_scan {
146 struct list_head mm_head;
147 struct mm_slot *mm_slot;
148 unsigned long address;
149 #ifdef CONFIG_FINEGRAINED_THP
152 struct list_head hint_list;
153 #endif /* CONFIG_FINEGRAINED_THP */
156 static struct khugepaged_scan khugepaged_scan = {
157 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
158 #ifdef CONFIG_FINEGRAINED_THP
159 .hint_list = LIST_HEAD_INIT(khugepaged_scan.hint_list),
164 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
165 struct kobj_attribute *attr,
168 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
171 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
172 struct kobj_attribute *attr,
173 const char *buf, size_t count)
178 err = kstrtoul(buf, 10, &msecs);
179 if (err || msecs > UINT_MAX)
182 khugepaged_scan_sleep_millisecs = msecs;
183 khugepaged_sleep_expire = 0;
184 wake_up_interruptible(&khugepaged_wait);
188 static struct kobj_attribute scan_sleep_millisecs_attr =
189 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
190 scan_sleep_millisecs_store);
192 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
193 struct kobj_attribute *attr,
196 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
199 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
200 struct kobj_attribute *attr,
201 const char *buf, size_t count)
206 err = kstrtoul(buf, 10, &msecs);
207 if (err || msecs > UINT_MAX)
210 khugepaged_alloc_sleep_millisecs = msecs;
211 khugepaged_sleep_expire = 0;
212 wake_up_interruptible(&khugepaged_wait);
216 static struct kobj_attribute alloc_sleep_millisecs_attr =
217 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
218 alloc_sleep_millisecs_store);
220 static ssize_t pages_to_scan_show(struct kobject *kobj,
221 struct kobj_attribute *attr,
224 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
226 static ssize_t pages_to_scan_store(struct kobject *kobj,
227 struct kobj_attribute *attr,
228 const char *buf, size_t count)
233 err = kstrtoul(buf, 10, &pages);
234 if (err || !pages || pages > UINT_MAX)
237 khugepaged_pages_to_scan = pages;
241 static struct kobj_attribute pages_to_scan_attr =
242 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
243 pages_to_scan_store);
245 static ssize_t pages_collapsed_show(struct kobject *kobj,
246 struct kobj_attribute *attr,
249 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
251 static struct kobj_attribute pages_collapsed_attr =
252 __ATTR_RO(pages_collapsed);
254 static ssize_t full_scans_show(struct kobject *kobj,
255 struct kobj_attribute *attr,
258 return sprintf(buf, "%u\n", khugepaged_full_scans);
260 static struct kobj_attribute full_scans_attr =
261 __ATTR_RO(full_scans);
263 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
264 struct kobj_attribute *attr, char *buf)
266 return single_hugepage_flag_show(kobj, attr, buf,
267 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
269 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
270 struct kobj_attribute *attr,
271 const char *buf, size_t count)
273 return single_hugepage_flag_store(kobj, attr, buf, count,
274 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
276 static struct kobj_attribute khugepaged_defrag_attr =
277 __ATTR(defrag, 0644, khugepaged_defrag_show,
278 khugepaged_defrag_store);
281 * max_ptes_none controls if khugepaged should collapse hugepages over
282 * any unmapped ptes in turn potentially increasing the memory
283 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
284 * reduce the available free memory in the system as it
285 * runs. Increasing max_ptes_none will instead potentially reduce the
286 * free memory in the system during the khugepaged scan.
288 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
289 struct kobj_attribute *attr,
292 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
294 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
295 struct kobj_attribute *attr,
296 const char *buf, size_t count)
299 unsigned long max_ptes_none;
301 err = kstrtoul(buf, 10, &max_ptes_none);
302 if (err || max_ptes_none > HPAGE_PMD_NR-1)
305 khugepaged_max_ptes_none = max_ptes_none;
309 static struct kobj_attribute khugepaged_max_ptes_none_attr =
310 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
311 khugepaged_max_ptes_none_store);
313 static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
314 struct kobj_attribute *attr,
317 return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
320 static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
321 struct kobj_attribute *attr,
322 const char *buf, size_t count)
325 unsigned long max_ptes_swap;
327 err = kstrtoul(buf, 10, &max_ptes_swap);
328 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
331 khugepaged_max_ptes_swap = max_ptes_swap;
336 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
337 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
338 khugepaged_max_ptes_swap_store);
340 static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj,
341 struct kobj_attribute *attr,
344 return sprintf(buf, "%u\n", khugepaged_max_ptes_shared);
347 static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
348 struct kobj_attribute *attr,
349 const char *buf, size_t count)
352 unsigned long max_ptes_shared;
354 err = kstrtoul(buf, 10, &max_ptes_shared);
355 if (err || max_ptes_shared > HPAGE_PMD_NR-1)
358 khugepaged_max_ptes_shared = max_ptes_shared;
363 static struct kobj_attribute khugepaged_max_ptes_shared_attr =
364 __ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show,
365 khugepaged_max_ptes_shared_store);
367 static struct attribute *khugepaged_attr[] = {
368 &khugepaged_defrag_attr.attr,
369 &khugepaged_max_ptes_none_attr.attr,
370 &khugepaged_max_ptes_swap_attr.attr,
371 &khugepaged_max_ptes_shared_attr.attr,
372 &pages_to_scan_attr.attr,
373 &pages_collapsed_attr.attr,
374 &full_scans_attr.attr,
375 &scan_sleep_millisecs_attr.attr,
376 &alloc_sleep_millisecs_attr.attr,
380 struct attribute_group khugepaged_attr_group = {
381 .attrs = khugepaged_attr,
382 .name = "khugepaged",
384 #endif /* CONFIG_SYSFS */
386 int hugepage_madvise(struct vm_area_struct *vma,
387 unsigned long *vm_flags, int advice)
393 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
394 * can't handle this properly after s390_enable_sie, so we simply
395 * ignore the madvise to prevent qemu from causing a SIGSEGV.
397 if (mm_has_pgste(vma->vm_mm))
400 *vm_flags &= ~VM_NOHUGEPAGE;
401 *vm_flags |= VM_HUGEPAGE;
403 * If the vma become good for khugepaged to scan,
404 * register it here without waiting a page fault that
405 * may not happen any time soon.
407 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
408 khugepaged_enter_vma_merge(vma, *vm_flags))
411 case MADV_NOHUGEPAGE:
412 *vm_flags &= ~VM_HUGEPAGE;
413 *vm_flags |= VM_NOHUGEPAGE;
415 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
416 * this vma even if we leave the mm registered in khugepaged if
417 * it got registered before VM_NOHUGEPAGE was set.
425 int __init khugepaged_init(void)
427 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
428 sizeof(struct mm_slot),
429 __alignof__(struct mm_slot), 0, NULL);
433 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
434 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
435 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
436 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
438 #ifdef CONFIG_FINEGRAINED_THP
439 khugepaged_max_ptes_none_64kb = HPAGE_CONT_PTE_NR - 1;
440 khugepaged_max_ptes_swap_64kb = HPAGE_CONT_PTE_NR / 8;
441 khugepaged_max_ptes_shared_64kb = HPAGE_CONT_PTE_NR / 2;
446 void __init khugepaged_destroy(void)
448 kmem_cache_destroy(mm_slot_cache);
451 static inline struct mm_slot *alloc_mm_slot(void)
453 if (!mm_slot_cache) /* initialization failed */
455 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
458 static inline void free_mm_slot(struct mm_slot *mm_slot)
460 kmem_cache_free(mm_slot_cache, mm_slot);
463 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
465 struct mm_slot *mm_slot;
467 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
468 if (mm == mm_slot->mm)
474 static void insert_to_mm_slots_hash(struct mm_struct *mm,
475 struct mm_slot *mm_slot)
478 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
481 static inline int khugepaged_test_exit(struct mm_struct *mm)
483 return atomic_read(&mm->mm_users) == 0;
486 #ifdef CONFIG_FINEGRAINED_THP
487 static void clear_hint_list(struct mm_slot *slot);
488 #endif /* CONFIG_FINEGRAINED_THP */
490 static bool hugepage_vma_check(struct vm_area_struct *vma,
491 unsigned long vm_flags)
493 /* Explicitly disabled through madvise. */
494 if ((vm_flags & VM_NOHUGEPAGE) ||
495 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
498 /* Check arch-dependent shmem hugepage available */
499 if (arch_hugepage_vma_shmem_check(vma, vm_flags))
501 /* Enabled via shmem mount options or sysfs settings. */
502 else if (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) {
503 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
507 /* THP settings require madvise. */
508 if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
511 /* Check arch-dependent file hugepage available */
512 if (arch_hugepage_vma_file_check(vma, vm_flags))
514 /* Read-only file mappings need to be aligned for THP to work. */
515 else if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
516 (vm_flags & VM_DENYWRITE)) {
517 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
521 if (!vma->anon_vma || vma->vm_ops)
523 if (vma_is_temporary_stack(vma))
525 return !(vm_flags & VM_NO_KHUGEPAGED);
528 int __khugepaged_enter(struct mm_struct *mm)
530 struct mm_slot *mm_slot;
533 mm_slot = alloc_mm_slot();
537 /* __khugepaged_exit() must not run from under us */
538 VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
539 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
540 free_mm_slot(mm_slot);
544 spin_lock(&khugepaged_mm_lock);
545 insert_to_mm_slots_hash(mm, mm_slot);
547 * Insert just behind the scanning cursor, to let the area settle
550 wakeup = list_empty(&khugepaged_scan.mm_head);
551 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
552 spin_unlock(&khugepaged_mm_lock);
556 wake_up_interruptible(&khugepaged_wait);
561 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
562 unsigned long vm_flags)
564 unsigned long hstart, hend;
567 * khugepaged only supports read-only files for non-shmem files.
568 * khugepaged does not yet work on special mappings. And
569 * file-private shmem THP is not supported.
571 if (!hugepage_vma_check(vma, vm_flags))
574 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
575 hend = vma->vm_end & HPAGE_PMD_MASK;
577 return khugepaged_enter(vma, vm_flags);
578 #ifdef CONFIG_FINEGRAINED_THP
579 hstart = (vma->vm_start + ~HPAGE_CONT_PTE_MASK) & HPAGE_CONT_PTE_MASK;
580 hend = vma->vm_end & HPAGE_CONT_PTE_MASK;
582 return khugepaged_enter(vma, vm_flags);
583 #endif /* CONFIG_FINEGRAINED_THP */
587 void __khugepaged_exit(struct mm_struct *mm)
589 struct mm_slot *mm_slot;
592 spin_lock(&khugepaged_mm_lock);
593 mm_slot = get_mm_slot(mm);
594 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
595 #ifdef CONFIG_FINEGRAINED_THP
596 clear_hint_list(mm_slot);
598 hash_del(&mm_slot->hash);
599 list_del(&mm_slot->mm_node);
602 spin_unlock(&khugepaged_mm_lock);
605 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
606 free_mm_slot(mm_slot);
608 } else if (mm_slot) {
610 * This is required to serialize against
611 * khugepaged_test_exit() (which is guaranteed to run
612 * under mmap sem read mode). Stop here (after we
613 * return all pagetables will be destroyed) until
614 * khugepaged has finished working on the pagetables
615 * under the mmap_lock.
618 mmap_write_unlock(mm);
622 static void release_pte_page(struct page *page)
624 mod_node_page_state(page_pgdat(page),
625 NR_ISOLATED_ANON + page_is_file_lru(page),
628 putback_lru_page(page);
631 static void release_pte_pages(pte_t *pte, pte_t *_pte,
632 struct list_head *compound_pagelist)
634 struct page *page, *tmp;
636 while (--_pte >= pte) {
637 pte_t pteval = *_pte;
639 page = pte_page(pteval);
640 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
642 release_pte_page(page);
645 list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
646 list_del(&page->lru);
647 release_pte_page(page);
651 static bool is_refcount_suitable(struct page *page)
653 int expected_refcount;
655 expected_refcount = total_mapcount(page);
656 if (PageSwapCache(page))
657 expected_refcount += compound_nr(page);
659 return page_count(page) == expected_refcount;
662 #ifdef CONFIG_FINEGRAINED_THP
663 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
664 unsigned long address,
666 struct list_head *compound_pagelist,
668 #else /* CONFIG_FINEGRAINED_THP */
669 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
670 unsigned long address,
672 struct list_head *compound_pagelist)
673 #endif /* CONFIG_FINEGRAINED_THP */
675 struct page *page = NULL;
677 int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
678 bool writable = false;
679 #ifdef CONFIG_FINEGRAINED_THP
680 int max_ptes_shared, max_ptes_none;
683 if (hpage_type == THP_TYPE_64KB) {
684 hpage_nr = HPAGE_CONT_PTE_NR;
685 max_ptes_shared = khugepaged_max_ptes_shared_64kb;
686 max_ptes_none = khugepaged_max_ptes_none_64kb;
688 hpage_nr = HPAGE_PMD_NR;
689 max_ptes_shared = khugepaged_max_ptes_shared;
690 max_ptes_none = khugepaged_max_ptes_none;
692 #endif /* CONFIG_FINEGRAINED_THP */
695 #ifdef CONFIG_FINEGRAINED_THP
696 _pte < pte + hpage_nr;
698 _pte < pte+HPAGE_PMD_NR;
700 _pte++, address += PAGE_SIZE) {
701 pte_t pteval = *_pte;
702 if (pte_none(pteval) || (pte_present(pteval) &&
703 is_zero_pfn(pte_pfn(pteval)))) {
704 #ifdef CONFIG_FINEGRAINED_THP
705 if (!userfaultfd_armed(vma) &&
706 ++none_or_zero <= max_ptes_none)
707 #else /* CONFIG_FINEGRAINED_THP */
708 if (!userfaultfd_armed(vma) &&
709 ++none_or_zero <= khugepaged_max_ptes_none)
710 #endif /* CONFIG_FINEGRAINED_THP */
714 result = SCAN_EXCEED_NONE_PTE;
718 if (!pte_present(pteval)) {
719 result = SCAN_PTE_NON_PRESENT;
722 page = vm_normal_page(vma, address, pteval);
723 if (unlikely(!page)) {
724 result = SCAN_PAGE_NULL;
728 VM_BUG_ON_PAGE(!PageAnon(page), page);
730 #ifdef CONFIG_FINEGRAINED_THP
731 if (page_mapcount(page) > 1 &&
732 ++shared > max_ptes_shared)
733 #else /* CONFIG_FINEGRAINED_THP */
734 if (page_mapcount(page) > 1 &&
735 ++shared > khugepaged_max_ptes_shared)
736 #endif /* CONFIG_FINEGRAINED_THP */
738 result = SCAN_EXCEED_SHARED_PTE;
742 if (PageCompound(page)) {
744 page = compound_head(page);
747 * Check if we have dealt with the compound page
750 list_for_each_entry(p, compound_pagelist, lru) {
757 * We can do it before isolate_lru_page because the
758 * page can't be freed from under us. NOTE: PG_lock
759 * is needed to serialize against split_huge_page
760 * when invoked from the VM.
762 if (!trylock_page(page)) {
763 result = SCAN_PAGE_LOCK;
768 * Check if the page has any GUP (or other external) pins.
770 * The page table that maps the page has been already unlinked
771 * from the page table tree and this process cannot get
772 * an additinal pin on the page.
774 * New pins can come later if the page is shared across fork,
775 * but not from this process. The other process cannot write to
776 * the page, only trigger CoW.
778 if (!is_refcount_suitable(page)) {
780 result = SCAN_PAGE_COUNT;
783 if (!pte_write(pteval) && PageSwapCache(page) &&
784 !reuse_swap_page(page, NULL)) {
786 * Page is in the swap cache and cannot be re-used.
787 * It cannot be collapsed into a THP.
790 result = SCAN_SWAP_CACHE_PAGE;
795 * Isolate the page to avoid collapsing an hugepage
796 * currently in use by the VM.
798 if (isolate_lru_page(page)) {
800 result = SCAN_DEL_PAGE_LRU;
803 mod_node_page_state(page_pgdat(page),
804 NR_ISOLATED_ANON + page_is_file_lru(page),
806 VM_BUG_ON_PAGE(!PageLocked(page), page);
807 VM_BUG_ON_PAGE(PageLRU(page), page);
809 if (PageCompound(page))
810 list_add_tail(&page->lru, compound_pagelist);
812 /* There should be enough young pte to collapse the page */
813 if (pte_young(pteval) ||
814 page_is_young(page) || PageReferenced(page) ||
815 mmu_notifier_test_young(vma->vm_mm, address))
818 if (pte_write(pteval))
821 if (likely(writable)) {
822 if (likely(referenced)) {
823 result = SCAN_SUCCEED;
824 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
825 referenced, writable, result);
829 result = SCAN_PAGE_RO;
833 release_pte_pages(pte, _pte, compound_pagelist);
834 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
835 referenced, writable, result);
839 #ifdef CONFIG_FINEGRAINED_THP
840 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
841 struct vm_area_struct *vma,
842 unsigned long address,
844 struct list_head *compound_pagelist,
846 #else /* CONFIG_FINEGRAINED_THP */
847 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
848 struct vm_area_struct *vma,
849 unsigned long address,
851 struct list_head *compound_pagelist)
852 #endif /* CONFIG_FINEGRAINED_THP */
854 struct page *src_page, *tmp;
856 #ifdef CONFIG_FINEGRAINED_THP
857 int hpage_nr = (hpage_type == THP_TYPE_64KB ?
858 HPAGE_CONT_PTE_NR : HPAGE_PMD_NR);
862 #ifdef CONFIG_FINEGRAINED_THP
863 _pte < pte + hpage_nr;
865 _pte < pte + HPAGE_PMD_NR;
867 _pte++, page++, address += PAGE_SIZE) {
868 pte_t pteval = *_pte;
870 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
871 clear_user_highpage(page, address);
872 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
873 if (is_zero_pfn(pte_pfn(pteval))) {
875 * ptl mostly unnecessary.
879 * paravirt calls inside pte_clear here are
882 pte_clear(vma->vm_mm, address, _pte);
886 src_page = pte_page(pteval);
887 copy_user_highpage(page, src_page, address, vma);
888 if (!PageCompound(src_page))
889 release_pte_page(src_page);
891 * ptl mostly unnecessary, but preempt has to
892 * be disabled to update the per-cpu stats
893 * inside page_remove_rmap().
897 * paravirt calls inside pte_clear here are
900 pte_clear(vma->vm_mm, address, _pte);
901 page_remove_rmap(src_page, false);
903 free_page_and_swap_cache(src_page);
907 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
908 list_del(&src_page->lru);
909 release_pte_page(src_page);
913 static void khugepaged_alloc_sleep(void)
917 add_wait_queue(&khugepaged_wait, &wait);
918 freezable_schedule_timeout_interruptible(
919 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
920 remove_wait_queue(&khugepaged_wait, &wait);
923 static int khugepaged_node_load[MAX_NUMNODES];
925 static bool khugepaged_scan_abort(int nid)
930 * If node_reclaim_mode is disabled, then no extra effort is made to
931 * allocate memory locally.
933 if (!node_reclaim_mode)
936 /* If there is a count for this node already, it must be acceptable */
937 if (khugepaged_node_load[nid])
940 for (i = 0; i < MAX_NUMNODES; i++) {
941 if (!khugepaged_node_load[i])
943 if (node_distance(nid, i) > node_reclaim_distance)
949 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
950 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
952 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
956 static int khugepaged_find_target_node(void)
958 static int last_khugepaged_target_node = NUMA_NO_NODE;
959 int nid, target_node = 0, max_value = 0;
961 /* find first node with max normal pages hit */
962 for (nid = 0; nid < MAX_NUMNODES; nid++)
963 if (khugepaged_node_load[nid] > max_value) {
964 max_value = khugepaged_node_load[nid];
968 /* do some balance if several nodes have the same hit record */
969 if (target_node <= last_khugepaged_target_node)
970 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
972 if (max_value == khugepaged_node_load[nid]) {
977 last_khugepaged_target_node = target_node;
981 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
983 if (IS_ERR(*hpage)) {
989 khugepaged_alloc_sleep();
999 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
1001 VM_BUG_ON_PAGE(*hpage, *hpage);
1003 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
1004 if (unlikely(!*hpage)) {
1005 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1006 *hpage = ERR_PTR(-ENOMEM);
1010 prep_transhuge_page(*hpage);
1011 count_vm_event(THP_COLLAPSE_ALLOC);
1015 static int khugepaged_find_target_node(void)
1020 #ifdef CONFIG_FINEGRAINED_THP
1021 static inline struct page *alloc_khugepaged_hugepage(int hpage_order)
1023 static inline struct page *alloc_khugepaged_hugepage(void)
1028 #ifdef CONFIG_FINEGRAINED_THP
1029 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
1032 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
1036 prep_transhuge_page(page);
1040 static struct page *khugepaged_alloc_hugepage(bool *wait)
1045 #ifdef CONFIG_FINEGRAINED_THP
1046 hpage = alloc_khugepaged_hugepage(HPAGE_PMD_ORDER);
1048 hpage = alloc_khugepaged_hugepage();
1051 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1056 khugepaged_alloc_sleep();
1058 count_vm_event(THP_COLLAPSE_ALLOC);
1059 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
1064 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
1067 * If the hpage allocated earlier was briefly exposed in page cache
1068 * before collapse_file() failed, it is possible that racing lookups
1069 * have not yet completed, and would then be unpleasantly surprised by
1070 * finding the hpage reused for the same mapping at a different offset.
1071 * Just release the previous allocation if there is any danger of that.
1073 if (*hpage && page_count(*hpage) > 1) {
1079 *hpage = khugepaged_alloc_hugepage(wait);
1081 if (unlikely(!*hpage))
1087 #ifdef CONFIG_FINEGRAINED_THP
1088 static struct page *
1089 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node, int hpage_type)
1093 if (hpage_type == THP_TYPE_64KB)
1094 page = alloc_khugepaged_hugepage(HPAGE_CONT_PTE_ORDER);
1101 #else /* CONFIG_FINEGRAINED_THP */
1102 static struct page *
1103 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
1109 #endif /* CONFIG_FINEGRAINED_THP */
1113 * If mmap_lock temporarily dropped, revalidate vma
1114 * before taking mmap_lock.
1115 * Return 0 if succeeds, otherwise return none-zero
1116 * value (scan code).
1119 #ifdef CONFIG_FINEGRAINED_THP
1120 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
1121 struct vm_area_struct **vmap, int hpage_type)
1123 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
1124 struct vm_area_struct **vmap)
1127 struct vm_area_struct *vma;
1128 unsigned long hstart, hend;
1130 if (unlikely(khugepaged_test_exit(mm)))
1131 return SCAN_ANY_PROCESS;
1133 *vmap = vma = find_vma(mm, address);
1135 return SCAN_VMA_NULL;
1137 #ifdef CONFIG_FINEGRAINED_THP
1138 if (hpage_type == THP_TYPE_64KB) {
1139 hstart = (vma->vm_start + ~HPAGE_CONT_PTE_MASK) & HPAGE_CONT_PTE_MASK;
1140 hend = vma->vm_end & HPAGE_CONT_PTE_MASK;
1141 if (address < hstart || address + HPAGE_CONT_PTE_SIZE > hend)
1142 return SCAN_ADDRESS_RANGE;
1143 if (!hugepage_vma_check(vma, vma->vm_flags))
1144 return SCAN_VMA_CHECK;
1147 #endif /* CONFIG_FINEGRAINED_THP */
1148 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1149 hend = vma->vm_end & HPAGE_PMD_MASK;
1150 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
1151 return SCAN_ADDRESS_RANGE;
1152 if (!hugepage_vma_check(vma, vma->vm_flags))
1153 return SCAN_VMA_CHECK;
1154 /* Anon VMA expected */
1155 if (!vma->anon_vma || vma->vm_ops)
1156 return SCAN_VMA_CHECK;
1161 * Bring missing pages in from swap, to complete THP collapse.
1162 * Only done if khugepaged_scan_pmd believes it is worthwhile.
1164 * Called and returns without pte mapped or spinlocks held,
1165 * but with mmap_lock held to protect against vma changes.
1168 #ifdef CONFIG_FINEGRAINED_THP
1169 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
1170 struct vm_area_struct *vma,
1171 unsigned long address, pmd_t *pmd,
1172 int referenced, int hpage_type)
1173 #else /* CONFIG_FINEGRAINED_THP */
1174 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
1175 struct vm_area_struct *vma,
1176 unsigned long address, pmd_t *pmd,
1178 #endif /* CONFIG_FINEGRAINED_THP */
1182 struct vm_fault vmf = {
1185 .flags = FAULT_FLAG_ALLOW_RETRY,
1187 .pgoff = linear_page_index(vma, address),
1189 #ifdef CONFIG_FINEGRAINED_THP
1190 int hpage_size = (hpage_type == THP_TYPE_64KB) ?
1191 HPAGE_CONT_PTE_SIZE : HPAGE_PMD_SIZE;
1194 vmf.pte = pte_offset_map(pmd, address);
1196 #ifdef CONFIG_FINEGRAINED_THP
1197 vmf.address < address + hpage_size;
1199 vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
1201 vmf.pte++, vmf.address += PAGE_SIZE) {
1202 vmf.orig_pte = *vmf.pte;
1203 if (!is_swap_pte(vmf.orig_pte))
1206 ret = do_swap_page(&vmf);
1208 /* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
1209 if (ret & VM_FAULT_RETRY) {
1211 #ifdef CONFIG_FINEGRAINED_THP
1212 if (hugepage_vma_revalidate(mm, address, &vmf.vma, hpage_type))
1214 if (hugepage_vma_revalidate(mm, address, &vmf.vma))
1217 /* vma is no longer available, don't continue to swapin */
1218 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1221 /* check if the pmd is still valid */
1222 if (mm_find_pmd(mm, address) != pmd) {
1223 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1227 if (ret & VM_FAULT_ERROR) {
1228 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1231 /* pte is unmapped now, we need to map it */
1232 vmf.pte = pte_offset_map(pmd, vmf.address);
1237 /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1241 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
1245 #ifdef CONFIG_FINEGRAINED_THP
1246 static void collapse_huge_page(struct mm_struct *mm,
1247 unsigned long address,
1248 struct page **hpage,
1249 int node, int referenced, int unmapped,
1251 #else /* CONFIG_FINEGRAINED_THP */
1252 static void collapse_huge_page(struct mm_struct *mm,
1253 unsigned long address,
1254 struct page **hpage,
1255 int node, int referenced, int unmapped)
1256 #endif /* CONFIG_FINEGRAINED_THP */
1258 LIST_HEAD(compound_pagelist);
1262 struct page *new_page;
1263 spinlock_t *pmd_ptl, *pte_ptl;
1264 int isolated = 0, result = 0;
1265 struct vm_area_struct *vma;
1266 struct mmu_notifier_range range;
1269 #ifdef CONFIG_FINEGRAINED_THP
1272 VM_BUG_ON(address & (hpage_type == THP_TYPE_64KB ?
1273 ~HPAGE_CONT_PTE_MASK : ~HPAGE_PMD_MASK));
1275 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1278 /* Only allocate from the target node */
1279 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1282 * Before allocating the hugepage, release the mmap_lock read lock.
1283 * The allocation can take potentially a long time if it involves
1284 * sync compaction, and we do not need to hold the mmap_lock during
1285 * that. We will recheck the vma after taking it again in write mode.
1287 mmap_read_unlock(mm);
1288 #ifdef CONFIG_FINEGRAINED_THP
1289 new_page = khugepaged_alloc_page(hpage, gfp, node, hpage_type);
1291 new_page = khugepaged_alloc_page(hpage, gfp, node);
1294 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1298 if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
1299 result = SCAN_CGROUP_CHARGE_FAIL;
1302 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1305 #ifdef CONFIG_FINEGRAINED_THP
1306 result = hugepage_vma_revalidate(mm, address, &vma, hpage_type);
1308 result = hugepage_vma_revalidate(mm, address, &vma);
1311 mmap_read_unlock(mm);
1315 pmd = mm_find_pmd(mm, address);
1317 result = SCAN_PMD_NULL;
1318 mmap_read_unlock(mm);
1323 * __collapse_huge_page_swapin always returns with mmap_lock locked.
1324 * If it fails, we release mmap_lock and jump out_nolock.
1325 * Continuing to collapse causes inconsistency.
1327 #ifdef CONFIG_FINEGRAINED_THP
1328 if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1329 pmd, referenced, hpage_type)) {
1330 mmap_read_unlock(mm);
1333 #else /* CONFIG_FINEGRAINED_THP */
1334 if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1336 mmap_read_unlock(mm);
1339 #endif /* CONFIG_FINEGRAINED_THP*/
1341 mmap_read_unlock(mm);
1343 * Prevent all access to pagetables with the exception of
1344 * gup_fast later handled by the ptep_clear_flush and the VM
1345 * handled by the anon_vma lock + PG_lock.
1347 mmap_write_lock(mm);
1348 #ifdef CONFIG_FINEGRAINED_THP
1349 result = hugepage_vma_revalidate(mm, address, &vma, hpage_type);
1351 result = hugepage_vma_revalidate(mm, address, &vma);
1355 /* check if the pmd is still valid */
1356 if (mm_find_pmd(mm, address) != pmd)
1359 anon_vma_lock_write(vma->anon_vma);
1361 #ifdef CONFIG_FINEGRAINED_THP
1362 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1363 address, address + (hpage_type == THP_TYPE_64KB ?
1364 HPAGE_CONT_PTE_SIZE : HPAGE_PMD_SIZE));
1366 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1367 address, address + HPAGE_PMD_SIZE);
1369 mmu_notifier_invalidate_range_start(&range);
1371 pte = pte_offset_map(pmd, address);
1372 pte_ptl = pte_lockptr(mm, pmd);
1374 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1376 * After this gup_fast can't run anymore. This also removes
1377 * any huge TLB entry from the CPU so we won't allow
1378 * huge and small TLB entries for the same virtual address
1379 * to avoid the risk of CPU bugs in that area.
1381 #ifdef CONFIG_FINEGRAINED_THP
1382 if (hpage_type == THP_TYPE_64KB)
1383 /* FIXME: clearing ptes here causes
1384 * __collapse_huge_page_isolate and __collapse_huge_page_copy
1385 * to fail, __collapse_huge_page_copy also clears ptes
1387 flush_tlb_range(vma, address, address + HPAGE_CONT_PTE_SIZE);
1389 #endif /* CONFIG_FINEGRAINED_THP */
1390 _pmd = pmdp_collapse_flush(vma, address, pmd);
1391 spin_unlock(pmd_ptl);
1392 mmu_notifier_invalidate_range_end(&range);
1395 #ifdef CONFIG_FINEGRAINED_THP
1396 isolated = __collapse_huge_page_isolate(vma, address, pte,
1397 &compound_pagelist, hpage_type);
1398 #else /* CONFIG_FINEGRAINED_THP */
1399 isolated = __collapse_huge_page_isolate(vma, address, pte,
1400 &compound_pagelist);
1401 #endif /* CONFIG_FINEGRAINED_THP */
1402 spin_unlock(pte_ptl);
1404 if (unlikely(!isolated)) {
1405 #ifdef CONFIG_FINEGRAINED_THP
1406 if (hpage_type == THP_TYPE_64KB) {
1408 anon_vma_unlock_write(vma->anon_vma);
1412 #endif /* CONFIG_FINEGRAINED_THP */
1415 BUG_ON(!pmd_none(*pmd));
1417 * We can only use set_pmd_at when establishing
1418 * hugepmds and never for establishing regular pmds that
1419 * points to regular pagetables. Use pmd_populate for that
1421 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1422 spin_unlock(pmd_ptl);
1423 anon_vma_unlock_write(vma->anon_vma);
1429 * All pages are isolated and locked so anon_vma rmap
1430 * can't run anymore.
1432 anon_vma_unlock_write(vma->anon_vma);
1434 #ifdef CONFIG_FINEGRAINED_THP
1435 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1436 &compound_pagelist, hpage_type);
1437 #else /* CONFIG_FINEGRAINED_THP */
1438 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1439 &compound_pagelist);
1440 #endif /* CONFIG_FINEGRAINED_THP */
1442 __SetPageUptodate(new_page);
1444 #ifdef CONFIG_FINEGRAINED_THP
1445 if (hpage_type == THP_TYPE_64KB) {
1447 _pte = arch_make_huge_pte(new_page, vma);
1448 _pte = maybe_mkwrite(pte_mkdirty(_pte), vma);
1451 pgtable = pmd_pgtable(_pmd);
1453 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1454 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1456 #else /* CONFIG_FINEGRAINED_THP */
1457 pgtable = pmd_pgtable(_pmd);
1459 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1460 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1461 #endif /* CONFIG_FINEGRAINED_THP */
1463 * spin_lock() below is not the equivalent of smp_wmb(), so
1464 * this is needed to avoid the copy_huge_page writes to become
1465 * visible after the set_pmd_at() write.
1470 #ifdef CONFIG_FINEGRAINED_THP
1471 if (hpage_type == THP_TYPE_2MB)
1473 BUG_ON(!pmd_none(*pmd));
1474 page_add_new_anon_rmap(new_page, vma, address, true);
1475 lru_cache_add_inactive_or_unevictable(new_page, vma);
1477 #ifdef CONFIG_FINEGRAINED_THP
1478 if (hpage_type == THP_TYPE_64KB)
1479 arch_set_huge_pte_at(mm, address, pte, _pte, 0);
1481 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1482 set_pmd_at(mm, address, pmd, _pmd);
1484 update_mmu_cache_pmd(vma, address, pmd);
1485 #else /* CONFIG_FINEGRAINED_THP */
1486 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1487 set_pmd_at(mm, address, pmd, _pmd);
1488 update_mmu_cache_pmd(vma, address, pmd);
1489 #endif /* CONFIG_FINEGRAINED_THP */
1490 spin_unlock(pmd_ptl);
1492 #ifdef CONFIG_FINEGRAINED_THP
1493 if (hpage_type == THP_TYPE_2MB)
1497 khugepaged_pages_collapsed++;
1498 result = SCAN_SUCCEED;
1500 mmap_write_unlock(mm);
1502 if (!IS_ERR_OR_NULL(*hpage))
1503 mem_cgroup_uncharge(*hpage);
1504 #ifdef CONFIG_FINEGRAINED_THP
1505 if (result != SCAN_SUCCEED && new_page && hpage_type == THP_TYPE_64KB)
1508 trace_mm_collapse_huge_page(mm, isolated, result);
1514 #ifdef CONFIG_FINEGRAINED_THP
1515 static int khugepaged_scan_pmd(struct mm_struct *mm,
1516 struct vm_area_struct *vma,
1517 unsigned long address,
1518 struct page **hpage, int hpage_type)
1519 #else /* CONFIG_FINEGRAINED_THP */
1520 static int khugepaged_scan_pmd(struct mm_struct *mm,
1521 struct vm_area_struct *vma,
1522 unsigned long address,
1523 struct page **hpage)
1524 #endif /* CONFIG_FINEGRAINED_THP */
1528 int ret = 0, result = 0, referenced = 0;
1529 int none_or_zero = 0, shared = 0;
1530 struct page *page = NULL;
1531 unsigned long _address;
1533 int node = NUMA_NO_NODE, unmapped = 0;
1534 bool writable = false;
1536 #ifdef CONFIG_FINEGRAINED_THP
1538 int max_ptes_swap, max_ptes_none, max_ptes_shared;
1540 if (hpage_type == THP_TYPE_64KB) {
1541 VM_BUG_ON(address & ~HPAGE_CONT_PTE_MASK);
1542 hpage_nr = HPAGE_CONT_PTE_NR;
1543 max_ptes_swap = khugepaged_max_ptes_swap_64kb;
1544 max_ptes_none = khugepaged_max_ptes_none_64kb;
1545 max_ptes_shared = khugepaged_max_ptes_shared_64kb;
1547 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1548 hpage_nr = HPAGE_PMD_NR;
1549 max_ptes_swap = khugepaged_max_ptes_swap;
1550 max_ptes_none = khugepaged_max_ptes_none;
1551 max_ptes_shared = khugepaged_max_ptes_shared;
1553 #else /* CONFIG_FINEGRAINED_THP */
1554 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1555 #endif /* CONFIG_FINEGRAINED_THP */
1557 pmd = mm_find_pmd(mm, address);
1559 result = SCAN_PMD_NULL;
1563 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1564 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1565 for (_address = address, _pte = pte;
1566 #ifdef CONFIG_FINEGRAINED_THP
1567 _pte < pte + hpage_nr;
1569 _pte < pte+HPAGE_PMD_NR;
1571 _pte++, _address += PAGE_SIZE) {
1572 pte_t pteval = *_pte;
1573 if (is_swap_pte(pteval)) {
1574 #ifdef CONFIG_FINEGRAINED_THP
1575 if (++unmapped <= max_ptes_swap)
1577 if (++unmapped <= khugepaged_max_ptes_swap)
1581 * Always be strict with uffd-wp
1582 * enabled swap entries. Please see
1583 * comment below for pte_uffd_wp().
1585 if (pte_swp_uffd_wp(pteval)) {
1586 result = SCAN_PTE_UFFD_WP;
1591 result = SCAN_EXCEED_SWAP_PTE;
1595 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1596 if (!userfaultfd_armed(vma) &&
1597 #ifdef CONFIG_FINEGRAINED_THP
1598 ++none_or_zero <= max_ptes_none
1600 ++none_or_zero <= khugepaged_max_ptes_none
1606 result = SCAN_EXCEED_NONE_PTE;
1610 if (!pte_present(pteval)) {
1611 result = SCAN_PTE_NON_PRESENT;
1614 if (pte_uffd_wp(pteval)) {
1616 * Don't collapse the page if any of the small
1617 * PTEs are armed with uffd write protection.
1618 * Here we can also mark the new huge pmd as
1619 * write protected if any of the small ones is
1620 * marked but that could bring uknown
1621 * userfault messages that falls outside of
1622 * the registered range. So, just be simple.
1624 result = SCAN_PTE_UFFD_WP;
1627 if (pte_write(pteval))
1630 page = vm_normal_page(vma, _address, pteval);
1631 if (unlikely(!page)) {
1632 result = SCAN_PAGE_NULL;
1636 #ifdef CONFIG_FINEGRAINED_THP
1637 if (PageCompound(page) && PageTransHuge(compound_head(page))) {
1638 result = SCAN_PAGE_COMPOUND;
1642 if (page_mapcount(page) > 1 &&
1643 ++shared > max_ptes_shared)
1645 if (page_mapcount(page) > 1 &&
1646 ++shared > khugepaged_max_ptes_shared)
1649 result = SCAN_EXCEED_SHARED_PTE;
1653 page = compound_head(page);
1656 * Record which node the original page is from and save this
1657 * information to khugepaged_node_load[].
1658 * Khupaged will allocate hugepage from the node has the max
1661 node = page_to_nid(page);
1662 if (khugepaged_scan_abort(node)) {
1663 result = SCAN_SCAN_ABORT;
1666 khugepaged_node_load[node]++;
1667 if (!PageLRU(page)) {
1668 result = SCAN_PAGE_LRU;
1671 if (PageLocked(page)) {
1672 result = SCAN_PAGE_LOCK;
1675 if (!PageAnon(page)) {
1676 result = SCAN_PAGE_ANON;
1681 * Check if the page has any GUP (or other external) pins.
1683 * Here the check is racy it may see totmal_mapcount > refcount
1685 * For example, one process with one forked child process.
1686 * The parent has the PMD split due to MADV_DONTNEED, then
1687 * the child is trying unmap the whole PMD, but khugepaged
1688 * may be scanning the parent between the child has
1689 * PageDoubleMap flag cleared and dec the mapcount. So
1690 * khugepaged may see total_mapcount > refcount.
1692 * But such case is ephemeral we could always retry collapse
1693 * later. However it may report false positive if the page
1694 * has excessive GUP pins (i.e. 512). Anyway the same check
1695 * will be done again later the risk seems low.
1697 if (!is_refcount_suitable(page)) {
1698 result = SCAN_PAGE_COUNT;
1701 if (pte_young(pteval) ||
1702 page_is_young(page) || PageReferenced(page) ||
1703 mmu_notifier_test_young(vma->vm_mm, address))
1707 result = SCAN_PAGE_RO;
1708 } else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1709 result = SCAN_LACK_REFERENCED_PAGE;
1711 result = SCAN_SUCCEED;
1715 pte_unmap_unlock(pte, ptl);
1717 node = khugepaged_find_target_node();
1718 /* collapse_huge_page will return with the mmap_lock released */
1719 #ifdef CONFIG_FINEGRAINED_THP
1720 collapse_huge_page(mm, address, hpage, node,
1721 referenced, unmapped, hpage_type);
1723 collapse_huge_page(mm, address, hpage, node,
1724 referenced, unmapped);
1728 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1729 none_or_zero, result, unmapped);
1733 static void collect_mm_slot(struct mm_slot *mm_slot)
1735 struct mm_struct *mm = mm_slot->mm;
1737 lockdep_assert_held(&khugepaged_mm_lock);
1739 if (khugepaged_test_exit(mm)) {
1740 #ifdef CONFIG_FINEGRAINED_THP
1741 clear_hint_list(mm_slot);
1744 hash_del(&mm_slot->hash);
1745 list_del(&mm_slot->mm_node);
1748 * Not strictly needed because the mm exited already.
1750 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1753 /* khugepaged_mm_lock actually not necessary for the below */
1754 free_mm_slot(mm_slot);
1761 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1762 * khugepaged should try to collapse the page table.
1764 #ifdef CONFIG_FINEGRAINED_THP
1765 static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1766 unsigned long addr, int hpage_type)
1768 static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1772 struct mm_slot *mm_slot;
1774 #ifdef CONFIG_FINEGRAINED_THP
1775 VM_BUG_ON(addr & (hpage_type == THP_TYPE_64KB ?
1776 ~HPAGE_CONT_PTE_MASK :~HPAGE_PMD_MASK));
1778 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1781 spin_lock(&khugepaged_mm_lock);
1782 mm_slot = get_mm_slot(mm);
1783 #ifdef CONFIG_FINEGRAINED_THP
1784 if (hpage_type == THP_TYPE_64KB)
1787 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1788 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1789 spin_unlock(&khugepaged_mm_lock);
1794 * Try to collapse a pte-mapped THP for mm at address haddr.
1796 * This function checks whether all the PTEs in the PMD are pointing to the
1797 * right THP. If so, retract the page table so the THP can refault in with
1800 void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1802 unsigned long haddr = addr & HPAGE_PMD_MASK;
1803 struct vm_area_struct *vma = find_vma(mm, haddr);
1805 pte_t *start_pte, *pte;
1810 #ifdef CONFIG_FINEGRAINED_THP
1811 int hpage_type = (addr & 0x01) ? THP_TYPE_64KB : THP_TYPE_2MB;
1812 int hpage_nr = (hpage_type == THP_TYPE_64KB) ?
1813 HPAGE_CONT_PTE_NR : HPAGE_PMD_NR;
1814 int hpage_size = (hpage_type == THP_TYPE_64KB) ?
1815 HPAGE_CONT_PTE_SIZE : HPAGE_PMD_SIZE;
1817 if (hpage_type == THP_TYPE_64KB)
1818 haddr = addr & HPAGE_CONT_PTE_MASK;
1821 #ifdef CONFIG_FINEGRAINED_THP
1822 if (!vma || !vma->vm_file ||
1823 vma->vm_start > haddr || vma->vm_end < haddr + hpage_size)
1825 #else /* CONFIG_FINEGRAINED_THP */
1826 if (!vma || !vma->vm_file ||
1827 vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
1829 #endif /* CONFIG_FINEGRAINED_THP */
1832 * This vm_flags may not have VM_HUGEPAGE if the page was not
1833 * collapsed by this mm. But we can still collapse if the page is
1834 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1835 * will not fail the vma for missing VM_HUGEPAGE
1837 if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1840 hpage = find_lock_page(vma->vm_file->f_mapping,
1841 linear_page_index(vma, haddr));
1845 if (!PageHead(hpage))
1848 pmd = mm_find_pmd(mm, haddr);
1852 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1853 #ifdef CONFIG_FINEGRAINED_THP
1854 if (pte_cont(*start_pte)) {
1855 pte_unmap_unlock(start_pte, ptl);
1860 /* step 1: check all mapped PTEs are to the right huge page */
1861 for (i = 0, addr = haddr, pte = start_pte;
1862 #ifdef CONFIG_FINEGRAINED_THP
1867 i++, addr += PAGE_SIZE, pte++) {
1870 /* empty pte, skip */
1874 /* page swapped out, abort */
1875 if (!pte_present(*pte))
1878 page = vm_normal_page(vma, addr, *pte);
1881 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1882 * page table, but the new page will not be a subpage of hpage.
1884 if (hpage + i != page)
1889 /* step 2: adjust rmap */
1890 for (i = 0, addr = haddr, pte = start_pte;
1891 #ifdef CONFIG_FINEGRAINED_THP
1896 i++, addr += PAGE_SIZE, pte++) {
1901 page = vm_normal_page(vma, addr, *pte);
1902 page_remove_rmap(page, false);
1905 pte_unmap_unlock(start_pte, ptl);
1907 /* step 3: set proper refcount and mm_counters. */
1909 page_ref_sub(hpage, count);
1910 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1913 /* step 4: collapse pmd */
1914 ptl = pmd_lock(vma->vm_mm, pmd);
1915 #ifdef CONFIG_FINEGRAINED_THP
1916 if (hpage_type == THP_TYPE_64KB) {
1917 pte_t *ptep = pte_offset_map(pmd, haddr);
1918 arch_clear_huge_pte_range(vma->vm_mm, haddr, ptep);
1921 _pmd = pmdp_collapse_flush(vma, haddr, pmd);
1924 pte_free(mm, pmd_pgtable(_pmd));
1926 #else /* CONFIG_FINEGRAINED_THP*/
1927 _pmd = pmdp_collapse_flush(vma, haddr, pmd);
1930 pte_free(mm, pmd_pgtable(_pmd));
1931 #endif /* CONFIG_FINEGRAINED_THP */
1939 pte_unmap_unlock(start_pte, ptl);
1943 static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1945 struct mm_struct *mm = mm_slot->mm;
1948 if (likely(mm_slot->nr_pte_mapped_thp == 0))
1951 if (!mmap_write_trylock(mm))
1954 if (unlikely(khugepaged_test_exit(mm)))
1957 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1958 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1961 mm_slot->nr_pte_mapped_thp = 0;
1962 mmap_write_unlock(mm);
1966 #ifdef CONFIG_FINEGRAINED_THP
1967 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
1970 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1973 struct vm_area_struct *vma;
1974 struct mm_struct *mm;
1977 #ifdef CONFIG_FINEGRAINED_THP
1979 int hpage_size = (hpage_type == THP_TYPE_64KB) ?
1980 HPAGE_CONT_PTE_SIZE : HPAGE_PMD_SIZE;
1981 #endif /* CONFIG_FINEGRAINED_THP */
1983 i_mmap_lock_write(mapping);
1984 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1986 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1987 * got written to. These VMAs are likely not worth investing
1988 * mmap_write_lock(mm) as PMD-mapping is likely to be split
1991 * Not that vma->anon_vma check is racy: it can be set up after
1992 * the check but before we took mmap_lock by the fault path.
1993 * But page lock would prevent establishing any new ptes of the
1994 * page, so we are safe.
1996 * An alternative would be drop the check, but check that page
1997 * table is clear before calling pmdp_collapse_flush() under
1998 * ptl. It has higher chance to recover THP for the VMA, but
1999 * has higher cost too.
2003 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
2004 #ifdef CONFIG_FINEGRAINED_THP
2005 if (hpage_type == THP_TYPE_64KB && addr & ~HPAGE_CONT_PTE_MASK)
2007 else if (hpage_type == THP_TYPE_2MB && addr & ~HPAGE_PMD_MASK)
2009 if (vma->vm_end < addr + hpage_size)
2013 pmd = mm_find_pmd(mm, addr);
2016 if (mmap_write_trylock(mm)) {
2017 spinlock_t *ptl = pmd_lock(mm, pmd);
2018 if (hpage_type == THP_TYPE_64KB) {
2020 ptep = pte_offset_map(pmd, addr);
2021 /* pte maps are established on page fault handling */
2022 arch_clear_huge_pte_range(mm, addr, ptep);
2027 * We need exclusive mmap_sem to retract page table.
2029 * We use trylock due to lock inversion: we need to acquire
2030 * mmap_sem while holding page lock. Fault path does it in
2031 * reverse order. Trylock is a way to avoid deadlock.
2033 _pmd = pmdp_collapse_flush(vma, addr, pmd);
2037 pte_free(mm, pmd_pgtable(_pmd));
2039 mmap_write_unlock(mm);
2041 khugepaged_add_pte_mapped_thp(vma->vm_mm, addr, hpage_type);
2042 #else /* CONFIG_FINEGRAINED_THP */
2043 if (addr & ~HPAGE_PMD_MASK)
2045 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
2048 pmd = mm_find_pmd(mm, addr);
2052 * We need exclusive mmap_lock to retract page table.
2054 * We use trylock due to lock inversion: we need to acquire
2055 * mmap_lock while holding page lock. Fault path does it in
2056 * reverse order. Trylock is a way to avoid deadlock.
2058 if (mmap_write_trylock(mm)) {
2059 if (!khugepaged_test_exit(mm)) {
2060 spinlock_t *ptl = pmd_lock(mm, pmd);
2061 /* assume page table is clear */
2062 _pmd = pmdp_collapse_flush(vma, addr, pmd);
2065 pte_free(mm, pmd_pgtable(_pmd));
2067 mmap_write_unlock(mm);
2069 /* Try again later */
2070 khugepaged_add_pte_mapped_thp(mm, addr);
2072 #endif /* CONFIG_FINEGRAINED_THP */
2074 i_mmap_unlock_write(mapping);
2078 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
2080 * Basic scheme is simple, details are more complex:
2081 * - allocate and lock a new huge page;
2082 * - scan page cache replacing old pages with the new one
2083 * + swap/gup in pages if necessary;
2085 * + keep old pages around in case rollback is required;
2086 * - if replacing succeeds:
2089 * + unlock huge page;
2090 * - if replacing failed;
2091 * + put all pages back and unfreeze them;
2092 * + restore gaps in the page cache;
2093 * + unlock and free huge page;
2095 #ifdef CONFIG_FINEGRAINED_THP
2096 static void collapse_file(struct mm_struct *mm,
2097 struct file *file, pgoff_t start,
2098 struct page **hpage, int node, int hpage_type)
2099 #else /* CONFIG_FINEGRAINED_THP */
2100 static void collapse_file(struct mm_struct *mm,
2101 struct file *file, pgoff_t start,
2102 struct page **hpage, int node)
2103 #endif /* CONFIG_FINEGRAINED_THP */
2105 struct address_space *mapping = file->f_mapping;
2107 struct page *new_page;
2108 #ifdef CONFIG_FINEGRAINED_THP
2109 int hpage_nr = (hpage_type == THP_TYPE_64KB ?
2110 HPAGE_CONT_PTE_NR : HPAGE_PMD_NR);
2111 int hpage_order = (hpage_type == THP_TYPE_64KB ?
2112 HPAGE_CONT_PTE_ORDER : HPAGE_PMD_ORDER);
2113 pgoff_t index, end = start + hpage_nr;
2114 #else /* CONFIG_FINEGRAINED_THP */
2115 pgoff_t index, end = start + HPAGE_PMD_NR;
2116 #endif /* CONFIG_FINEGRAINED_THP */
2117 LIST_HEAD(pagelist);
2118 #ifdef CONFIG_FINEGRAINED_THP
2119 XA_STATE_ORDER(xas, &mapping->i_pages, start, hpage_order);
2121 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
2123 int nr_none = 0, result = SCAN_SUCCEED;
2124 bool is_shmem = shmem_file(file);
2126 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
2127 #ifdef CONFIG_FINEGRAINED_THP
2128 VM_BUG_ON(start & (hpage_nr - 1));
2130 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
2133 /* Only allocate from the target node */
2134 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
2136 #ifdef CONFIG_FINEGRAINED_THP
2137 new_page = khugepaged_alloc_page(hpage, gfp, node, hpage_type);
2139 new_page = khugepaged_alloc_page(hpage, gfp, node);
2142 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
2146 if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
2147 result = SCAN_CGROUP_CHARGE_FAIL;
2150 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
2152 /* This will be less messy when we use multi-index entries */
2155 xas_create_range(&xas);
2156 if (!xas_error(&xas))
2158 xas_unlock_irq(&xas);
2159 if (!xas_nomem(&xas, GFP_KERNEL)) {
2165 __SetPageLocked(new_page);
2167 __SetPageSwapBacked(new_page);
2168 new_page->index = start;
2169 new_page->mapping = mapping;
2172 * At this point the new_page is locked and not up-to-date.
2173 * It's safe to insert it into the page cache, because nobody would
2174 * be able to map it or use it in another way until we unlock it.
2177 xas_set(&xas, start);
2178 for (index = start; index < end; index++) {
2179 struct page *page = xas_next(&xas);
2181 VM_BUG_ON(index != xas.xa_index);
2185 * Stop if extent has been truncated or
2186 * hole-punched, and is now completely
2189 if (index == start) {
2190 if (!xas_next_entry(&xas, end - 1)) {
2191 result = SCAN_TRUNCATED;
2194 xas_set(&xas, index);
2196 if (!shmem_charge(mapping->host, 1)) {
2200 xas_store(&xas, new_page);
2205 if (xa_is_value(page) || !PageUptodate(page)) {
2206 xas_unlock_irq(&xas);
2207 /* swap in or instantiate fallocated page */
2208 if (shmem_getpage(mapping->host, index, &page,
2213 } else if (trylock_page(page)) {
2215 xas_unlock_irq(&xas);
2217 result = SCAN_PAGE_LOCK;
2220 } else { /* !is_shmem */
2221 if (!page || xa_is_value(page)) {
2222 xas_unlock_irq(&xas);
2223 page_cache_sync_readahead(mapping, &file->f_ra,
2226 /* drain pagevecs to help isolate_lru_page() */
2228 page = find_lock_page(mapping, index);
2229 if (unlikely(page == NULL)) {
2233 } else if (PageDirty(page)) {
2235 * khugepaged only works on read-only fd,
2236 * so this page is dirty because it hasn't
2237 * been flushed since first write. There
2238 * won't be new dirty pages.
2240 * Trigger async flush here and hope the
2241 * writeback is done when khugepaged
2242 * revisits this page.
2244 * This is a one-off situation. We are not
2245 * forcing writeback in loop.
2247 xas_unlock_irq(&xas);
2248 filemap_flush(mapping);
2251 } else if (trylock_page(page)) {
2253 xas_unlock_irq(&xas);
2255 result = SCAN_PAGE_LOCK;
2261 * The page must be locked, so we can drop the i_pages lock
2262 * without racing with truncate.
2264 VM_BUG_ON_PAGE(!PageLocked(page), page);
2266 /* make sure the page is up to date */
2267 if (unlikely(!PageUptodate(page))) {
2273 * If file was truncated then extended, or hole-punched, before
2274 * we locked the first page, then a THP might be there already.
2276 if (PageTransCompound(page)) {
2277 result = SCAN_PAGE_COMPOUND;
2281 if (page_mapping(page) != mapping) {
2282 result = SCAN_TRUNCATED;
2286 if (!is_shmem && PageDirty(page)) {
2288 * khugepaged only works on read-only fd, so this
2289 * page is dirty because it hasn't been flushed
2290 * since first write.
2296 if (isolate_lru_page(page)) {
2297 result = SCAN_DEL_PAGE_LRU;
2301 if (page_has_private(page) &&
2302 !try_to_release_page(page, GFP_KERNEL)) {
2303 result = SCAN_PAGE_HAS_PRIVATE;
2304 putback_lru_page(page);
2308 if (page_mapped(page))
2309 unmap_mapping_pages(mapping, index, 1, false);
2312 xas_set(&xas, index);
2314 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
2315 VM_BUG_ON_PAGE(page_mapped(page), page);
2318 * The page is expected to have page_count() == 3:
2319 * - we hold a pin on it;
2320 * - one reference from page cache;
2321 * - one from isolate_lru_page;
2323 if (!page_ref_freeze(page, 3)) {
2324 result = SCAN_PAGE_COUNT;
2325 xas_unlock_irq(&xas);
2326 putback_lru_page(page);
2331 * Add the page to the list to be able to undo the collapse if
2332 * something go wrong.
2334 list_add_tail(&page->lru, &pagelist);
2336 /* Finally, replace with the new page. */
2337 xas_store(&xas, new_page);
2346 #ifdef CONFIG_FINEGRAINED_THP
2347 if (hpage_type == THP_TYPE_64KB)
2348 __inc_node_page_state(new_page, NR_SHMEM_64KB_THPS);
2350 __inc_node_page_state(new_page, NR_SHMEM_THPS);
2351 #else /* CONFIG_FINEGRAINED_THP */
2352 __inc_node_page_state(new_page, NR_SHMEM_THPS);
2353 #endif /* CONFIG_FINEGRAINED_THP */
2355 #ifdef CONFIG_FINEGRAINED_THP
2356 if (hpage_type == THP_TYPE_64KB)
2357 __inc_node_page_state(new_page, NR_FILE_64KB_THPS);
2359 __inc_node_page_state(new_page, NR_FILE_THPS);
2360 #else /* CONFIG_FINEGRAINED_THP */
2361 __inc_node_page_state(new_page, NR_FILE_THPS);
2362 #endif /* CONFIG_FINEGRAINED_THP */
2363 filemap_nr_thps_inc(mapping);
2367 __mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
2369 __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
2373 xas_unlock_irq(&xas);
2376 if (result == SCAN_SUCCEED) {
2377 struct page *page, *tmp;
2378 #ifdef CONFIG_FINEGRAINED_THP
2383 * Replacing old pages with new one has succeeded, now we
2384 * need to copy the content and free the old pages.
2387 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2388 #ifdef CONFIG_FINEGRAINED_THP
2389 if (hpage_type != THP_TYPE_64KB) {
2390 while (index < page->index) {
2391 clear_highpage(new_page + (index % HPAGE_PMD_NR));
2396 if (hpage_type == THP_TYPE_64KB) {
2397 copy_highpage(new_page + offset, page);
2400 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
2402 #else /* CONFIG_FINEGRAINED_THP */
2403 while (index < page->index) {
2404 clear_highpage(new_page + (index % HPAGE_PMD_NR));
2407 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
2409 #endif /* CONFIG_FINEGRAINED_THP */
2410 list_del(&page->lru);
2411 page->mapping = NULL;
2412 page_ref_unfreeze(page, 1);
2413 ClearPageActive(page);
2414 ClearPageUnevictable(page);
2419 #ifdef CONFIG_FINEGRAINED_THP
2420 if (hpage_type == THP_TYPE_64KB) {
2421 while (index < end) {
2422 clear_highpage(new_page + offset);
2427 while (index < end) {
2428 clear_highpage(new_page + (index % HPAGE_PMD_NR));
2432 #else /* CONFIG_FINEGRAINED_THP */
2433 while (index < end) {
2434 clear_highpage(new_page + (index % HPAGE_PMD_NR));
2437 #endif /* CONFIG_FINEGRAINED_THP */
2439 SetPageUptodate(new_page);
2440 #ifdef CONFIG_FINEGRAINED_THP
2441 page_ref_add(new_page, hpage_nr - 1);
2443 page_ref_add(new_page, HPAGE_PMD_NR - 1);
2446 set_page_dirty(new_page);
2447 lru_cache_add(new_page);
2450 * Remove pte page tables, so we can re-fault the page as huge.
2452 #ifdef CONFIG_FINEGRAINED_THP
2453 retract_page_tables(mapping, start, hpage_type);
2454 if (hpage_type == THP_TYPE_2MB)
2456 #else /* CONFIG_FINEGRAINED_THP */
2457 retract_page_tables(mapping, start);
2459 #endif /* CONFIG_FINEGRAINED_THP */
2460 khugepaged_pages_collapsed++;
2464 /* Something went wrong: roll back page cache changes */
2466 mapping->nrpages -= nr_none;
2469 shmem_uncharge(mapping->host, nr_none);
2471 xas_set(&xas, start);
2472 xas_for_each(&xas, page, end - 1) {
2473 page = list_first_entry_or_null(&pagelist,
2475 if (!page || xas.xa_index < page->index) {
2479 /* Put holes back where they were */
2480 xas_store(&xas, NULL);
2484 VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
2486 /* Unfreeze the page. */
2487 list_del(&page->lru);
2488 page_ref_unfreeze(page, 2);
2489 xas_store(&xas, page);
2491 xas_unlock_irq(&xas);
2493 putback_lru_page(page);
2497 xas_unlock_irq(&xas);
2499 new_page->mapping = NULL;
2502 unlock_page(new_page);
2504 #ifdef CONFIG_FINEGRAINED_THP
2505 if (result != SCAN_SUCCEED && new_page && hpage_type == THP_TYPE_64KB)
2508 VM_BUG_ON(!list_empty(&pagelist));
2509 if (!IS_ERR_OR_NULL(*hpage))
2510 mem_cgroup_uncharge(*hpage);
2511 /* TODO: tracepoints */
2514 #ifdef CONFIG_FINEGRAINED_THP
2515 static void khugepaged_scan_file(struct mm_struct *mm,
2516 struct file *file, pgoff_t start, struct page **hpage,
2518 #else /* CONFIG_FINEGRAINED_THP */
2519 static void khugepaged_scan_file(struct mm_struct *mm,
2520 struct file *file, pgoff_t start, struct page **hpage)
2521 #endif /* CONFIG_FINEGRAINED_THP */
2523 struct page *page = NULL;
2524 struct address_space *mapping = file->f_mapping;
2525 XA_STATE(xas, &mapping->i_pages, start);
2527 int node = NUMA_NO_NODE;
2528 int result = SCAN_SUCCEED;
2529 #ifdef CONFIG_FINEGRAINED_THP
2531 int max_ptes_swap, max_ptes_none, max_ptes_shared;
2533 if (hpage_type == THP_TYPE_64KB) {
2534 hpage_nr = HPAGE_CONT_PTE_NR; /* 64KB */
2535 max_ptes_swap = khugepaged_max_ptes_swap_64kb;
2536 max_ptes_none = khugepaged_max_ptes_none_64kb;
2537 max_ptes_shared = khugepaged_max_ptes_shared_64kb;
2539 hpage_nr = HPAGE_PMD_NR; /* 2MB */
2540 max_ptes_swap = khugepaged_max_ptes_swap;
2541 max_ptes_none = khugepaged_max_ptes_none;
2542 max_ptes_shared = khugepaged_max_ptes_shared;
2544 #endif /* CONFIG_FINEGRAINED_THP */
2548 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
2550 #ifdef CONFIG_FINEGRAINED_THP
2551 xas_for_each(&xas, page, start + hpage_nr - 1)
2553 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1)
2556 if (xas_retry(&xas, page))
2559 if (xa_is_value(page)) {
2560 #ifdef CONFIG_FINEGRAINED_THP
2561 if (++swap > max_ptes_swap)
2563 if (++swap > khugepaged_max_ptes_swap)
2566 result = SCAN_EXCEED_SWAP_PTE;
2572 if (PageTransCompound(page)) {
2573 result = SCAN_PAGE_COMPOUND;
2577 node = page_to_nid(page);
2578 if (khugepaged_scan_abort(node)) {
2579 result = SCAN_SCAN_ABORT;
2582 khugepaged_node_load[node]++;
2584 if (!PageLRU(page)) {
2585 result = SCAN_PAGE_LRU;
2589 if (page_count(page) !=
2590 1 + page_mapcount(page) + page_has_private(page)) {
2591 result = SCAN_PAGE_COUNT;
2596 * We probably should check if the page is referenced here, but
2597 * nobody would transfer pte_young() to PageReferenced() for us.
2598 * And rmap walk here is just too costly...
2603 if (need_resched()) {
2610 if (result == SCAN_SUCCEED) {
2611 #ifdef CONFIG_FINEGRAINED_THP
2612 if (present < hpage_nr - max_ptes_none)
2614 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none)
2617 result = SCAN_EXCEED_NONE_PTE;
2619 node = khugepaged_find_target_node();
2620 #ifdef CONFIG_FINEGRAINED_THP
2621 collapse_file(mm, file, start, hpage, node, hpage_type);
2623 collapse_file(mm, file, start, hpage, node);
2628 /* TODO: tracepoints */
2631 #ifdef CONFIG_FINEGRAINED_THP
2632 static void khugepaged_scan_file(struct mm_struct *mm,
2633 struct file *file, pgoff_t start, struct page **hpage,
2635 #else /* CONFIG_FINEGRAINED_THP */
2636 static void khugepaged_scan_file(struct mm_struct *mm,
2637 struct file *file, pgoff_t start, struct page **hpage)
2638 #endif /* CONFIG_FINEGRAINED_THP */
2643 static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
2649 #ifdef CONFIG_FINEGRAINED_THP
2651 * if return value > 0 -> vma can make hugepage
2652 * calculated hugepage start and hugepage end are stored in pointers
2653 * otherwise -> vma cannot make hugepage
2655 static inline int hugepage_determine_htype(unsigned long vm_start,
2656 unsigned long vm_end, unsigned long *hstart, unsigned long *hend) {
2657 unsigned long start, end;
2659 /* determine 2MB hugepage */
2660 start = (vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2661 end = vm_end & HPAGE_PMD_MASK;
2663 /* determine 64KB hugepage */
2664 start = (vm_start + ~HPAGE_CONT_PTE_MASK) & HPAGE_CONT_PTE_MASK;
2665 end = vm_end & HPAGE_CONT_PTE_MASK;
2667 return THP_TYPE_FAIL;
2670 return THP_TYPE_64KB;
2674 return THP_TYPE_2MB;
2678 KHUGEPAGE_SCAN_CONTINUE,
2679 KHUGEPAGE_SCAN_BREAK,
2680 KHUGEPAGE_SCAN_BREAK_MMAP_LOCK,
2683 static unsigned int khugepaged_scan_vma(struct mm_struct *mm,
2684 struct vm_area_struct *vma, struct page **hpage,
2685 unsigned int pages, int *progress)
2687 unsigned long hstart, hend;
2688 int hpage_type, ret;
2689 int hpage_size, hpage_nr;
2691 if (!hugepage_vma_check(vma, vma->vm_flags))
2692 return KHUGEPAGE_SCAN_CONTINUE;
2694 hpage_type = hugepage_determine_htype(
2695 (vma->vm_start > khugepaged_scan.address) ?
2696 vma->vm_start : khugepaged_scan.address,
2697 vma->vm_end, &hstart, &hend);
2699 if (hpage_type == THP_TYPE_FAIL)
2700 return KHUGEPAGE_SCAN_CONTINUE;
2701 if (khugepaged_scan.address > hend)
2702 return KHUGEPAGE_SCAN_CONTINUE;
2703 if (khugepaged_scan.address < hstart)
2704 khugepaged_scan.address = hstart;
2706 if (hpage_type == THP_TYPE_64KB) {
2707 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_CONT_PTE_MASK);
2708 hpage_size = HPAGE_CONT_PTE_SIZE; /* 64KB */
2709 hpage_nr = HPAGE_CONT_PTE_NR;
2710 } else if (hpage_type == THP_TYPE_2MB) {
2711 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2712 hpage_size = HPAGE_PMD_SIZE; /* 2MB */
2713 hpage_nr = HPAGE_PMD_NR;
2714 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
2715 !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
2717 /* fallback, vma or file not aligned to 2MB */
2718 hpage_size = HPAGE_CONT_PTE_SIZE; /* 64KB */
2719 hpage_nr = HPAGE_CONT_PTE_NR;
2720 hpage_type = THP_TYPE_64KB;
2725 while (khugepaged_scan.address < hend) {
2726 if (khugepaged_scan.address + hpage_size >= hend) {
2727 if (khugepaged_scan.address + HPAGE_CONT_PTE_SIZE < hend) {
2728 hpage_size = HPAGE_CONT_PTE_SIZE;
2729 hpage_nr = HPAGE_CONT_PTE_NR;
2730 hpage_type = THP_TYPE_64KB;
2735 if (unlikely(khugepaged_test_exit(mm)))
2736 return KHUGEPAGE_SCAN_BREAK;
2738 VM_BUG_ON(khugepaged_scan.address < hstart ||
2739 khugepaged_scan.address + hpage_size >
2741 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2742 struct file *file = get_file(vma->vm_file);
2743 pgoff_t pgoff = linear_page_index(vma,
2744 khugepaged_scan.address);
2746 mmap_read_unlock(mm);
2748 khugepaged_scan_file(mm, file, pgoff, hpage, hpage_type);
2751 ret = khugepaged_scan_pmd(mm, vma,
2752 khugepaged_scan.address,
2755 /* move to next address */
2756 khugepaged_scan.address += hpage_size;
2757 *progress += hpage_nr;
2759 /* we released mmap_sem so break loop */
2760 return KHUGEPAGE_SCAN_BREAK_MMAP_LOCK;
2761 if (*progress >= pages)
2762 return KHUGEPAGE_SCAN_BREAK;
2764 return KHUGEPAGE_SCAN_CONTINUE;
2767 static struct thp_scan_hint *find_scan_hint(struct mm_slot *slot,
2770 struct thp_scan_hint *hint;
2772 list_for_each_entry(hint, &khugepaged_scan.hint_list, hint_list) {
2773 if (hint->slot == slot)
2779 #ifdef CONFIG_THP_CONSERVATIVE
2780 /* caller must hold a proper mmap_lock */
2781 void khugepaged_mem_hook(struct mm_struct *mm, unsigned long addr,
2782 long diff, const char *debug)
2784 struct mm_slot *slot;
2785 struct vm_area_struct *vma;
2786 struct thp_scan_hint *hint;
2787 bool wakeup = false;
2790 vma = find_vma(mm, addr);
2791 if (!hugepage_vma_check(vma, vma->vm_flags))
2795 spin_lock(&khugepaged_mm_lock);
2796 slot = get_mm_slot(mm);
2798 /* make a new slot or go out */
2799 spin_unlock(&khugepaged_mm_lock);
2802 if (__khugepaged_enter(mm))
2808 hint = find_scan_hint(slot, addr);
2810 spin_unlock(&khugepaged_mm_lock);
2811 hint = kzalloc(sizeof(struct thp_scan_hint), GFP_KERNEL);
2815 hint->jiffies = jiffies;
2816 spin_lock(&khugepaged_mm_lock);
2817 list_add(&hint->hint_list, &khugepaged_scan.hint_list);
2818 khugepaged_scan.nr_hint++;
2821 if (hint->diff >= HPAGE_CONT_PTE_SIZE) {
2823 //list_move(&hint->hint_list, &khugepaged_scan.hint_list);
2825 spin_unlock(&khugepaged_mm_lock);
2827 /* if possible, wake khugepaged up for starting a scan */
2829 wake_up_interruptible(&khugepaged_wait);
2832 #else /* CONFIG_THP_CONSERVATIVE */
2833 void khugepaged_mem_hook(struct mm_struct *mm,
2834 unsigned long addr, long diff, const char *debug)
2836 #endif /* CONFIG_THP_CONSERVATIVE */
2838 static void clear_hint_list(struct mm_slot *slot)
2840 struct thp_scan_hint *hint;
2841 hint = find_scan_hint(slot, 0);
2843 list_del(&hint->hint_list);
2845 khugepaged_scan.nr_hint--;
2849 static struct thp_scan_hint *get_next_hint(void)
2851 if (!list_empty(&khugepaged_scan.hint_list)) {
2852 struct thp_scan_hint *hint = list_first_entry(
2853 &khugepaged_scan.hint_list,
2854 struct thp_scan_hint, hint_list);
2855 list_del(&hint->hint_list);
2856 khugepaged_scan.nr_hint--;
2861 #endif /* CONFIG_FINEGRAINED_THP */
2863 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2864 struct page **hpage)
2865 __releases(&khugepaged_mm_lock)
2866 __acquires(&khugepaged_mm_lock)
2868 struct mm_slot *mm_slot;
2869 struct mm_struct *mm;
2870 struct vm_area_struct *vma;
2874 lockdep_assert_held(&khugepaged_mm_lock);
2876 #ifdef CONFIG_FINEGRAINED_THP
2877 if (khugepaged_scan.mm_slot)
2878 mm_slot = khugepaged_scan.mm_slot;
2879 else if (!list_empty(&khugepaged_scan.hint_list)) {
2880 struct thp_scan_hint *hint;
2882 unsigned long jiffies_diff;
2885 hint = get_next_hint();
2889 mm_slot = hint->slot;
2890 mem_diff = hint->diff;
2891 jiffies_diff = jiffies - hint->jiffies;
2893 clear_hint_list(mm_slot);
2895 if (khugepaged_test_exit(mm_slot->mm))
2897 khugepaged_scan.address = 0;
2898 khugepaged_scan.mm_slot = mm_slot;
2901 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2902 struct mm_slot, mm_node);
2903 clear_hint_list(mm_slot);
2904 khugepaged_scan.address = 0;
2905 khugepaged_scan.mm_slot = mm_slot;
2907 #else /* CONFIG_FINEGRAINED_THP */
2908 if (khugepaged_scan.mm_slot)
2909 mm_slot = khugepaged_scan.mm_slot;
2911 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2912 struct mm_slot, mm_node);
2913 khugepaged_scan.address = 0;
2914 khugepaged_scan.mm_slot = mm_slot;
2916 #endif /* CONFIG_FINEGRAINED_THP */
2917 spin_unlock(&khugepaged_mm_lock);
2918 khugepaged_collapse_pte_mapped_thps(mm_slot);
2922 * Don't wait for semaphore (to avoid long wait times). Just move to
2923 * the next mm on the list.
2926 if (unlikely(!mmap_read_trylock(mm)))
2927 goto breakouterloop_mmap_lock;
2928 if (likely(!khugepaged_test_exit(mm)))
2929 vma = find_vma(mm, khugepaged_scan.address);
2932 for (; vma; vma = vma->vm_next) {
2933 #ifdef CONFIG_FINEGRAINED_THP
2936 unsigned long hstart, hend;
2940 if (unlikely(khugepaged_test_exit(mm))) {
2944 #ifdef CONFIG_FINEGRAINED_THP
2945 ret = khugepaged_scan_vma(mm, vma, hpage, pages, &progress);
2947 if (ret == KHUGEPAGE_SCAN_CONTINUE) {
2950 } else if (ret == KHUGEPAGE_SCAN_BREAK)
2951 goto breakouterloop;
2952 else if (ret == KHUGEPAGE_SCAN_BREAK_MMAP_LOCK)
2953 goto breakouterloop_mmap_lock;
2954 #else /* CONFIG_FINEGRAINED_THP */
2955 if (!hugepage_vma_check(vma, vma->vm_flags)) {
2960 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2961 hend = vma->vm_end & HPAGE_PMD_MASK;
2964 if (khugepaged_scan.address > hend)
2966 if (khugepaged_scan.address < hstart)
2967 khugepaged_scan.address = hstart;
2968 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2969 if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
2972 while (khugepaged_scan.address < hend) {
2975 if (unlikely(khugepaged_test_exit(mm)))
2976 goto breakouterloop;
2978 VM_BUG_ON(khugepaged_scan.address < hstart ||
2979 khugepaged_scan.address + HPAGE_PMD_SIZE >
2981 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2982 struct file *file = get_file(vma->vm_file);
2983 pgoff_t pgoff = linear_page_index(vma,
2984 khugepaged_scan.address);
2986 mmap_read_unlock(mm);
2988 khugepaged_scan_file(mm, file, pgoff, hpage);
2991 ret = khugepaged_scan_pmd(mm, vma,
2992 khugepaged_scan.address,
2995 /* move to next address */
2996 khugepaged_scan.address += HPAGE_PMD_SIZE;
2997 progress += HPAGE_PMD_NR;
2999 /* we released mmap_lock so break loop */
3000 goto breakouterloop_mmap_lock;
3001 if (progress >= pages)
3002 goto breakouterloop;
3004 #endif /* CONFIG_FINEGRAINED_THP */
3007 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
3008 breakouterloop_mmap_lock:
3010 spin_lock(&khugepaged_mm_lock);
3011 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
3013 * Release the current mm_slot if this mm is about to die, or
3014 * if we scanned all vmas of this mm.
3016 if (khugepaged_test_exit(mm) || !vma) {
3018 * Make sure that if mm_users is reaching zero while
3019 * khugepaged runs here, khugepaged_exit will find
3020 * mm_slot not pointing to the exiting mm.
3022 #ifdef CONFIG_FINEGRAINED_THP
3023 if (!list_empty(&khugepaged_scan.hint_list)) {
3024 unsigned long jiffies_diff;
3026 struct thp_scan_hint *hint;
3027 struct mm_slot *next_slot;
3030 hint = get_next_hint();
3034 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head)
3035 goto get_next_slot2;
3040 mem_diff = hint->diff;
3041 jiffies_diff = jiffies - hint->jiffies;
3042 next_slot = hint->slot;
3045 if (next_slot == mm_slot)
3046 goto get_next_hint2;
3048 if (!khugepaged_test_exit(next_slot->mm)) {
3049 list_move(&next_slot->mm_node, &mm_slot->mm_node);
3050 clear_hint_list(next_slot);
3052 goto get_next_hint2;
3054 khugepaged_scan.mm_slot = next_slot;
3055 khugepaged_scan.address = 0;
3056 } else if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
3058 khugepaged_scan.mm_slot = list_entry(
3059 mm_slot->mm_node.next,
3060 struct mm_slot, mm_node);
3061 clear_hint_list(khugepaged_scan.mm_slot);
3062 khugepaged_scan.address = 0;
3065 khugepaged_scan.mm_slot = NULL;
3066 khugepaged_full_scans++;
3068 #else /* CONFIG_FINEGRAINED_THP */
3069 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
3070 khugepaged_scan.mm_slot = list_entry(
3071 mm_slot->mm_node.next,
3072 struct mm_slot, mm_node);
3073 khugepaged_scan.address = 0;
3075 khugepaged_scan.mm_slot = NULL;
3076 khugepaged_full_scans++;
3078 #endif /* CONFIG_FINEGRAINED_THP */
3079 collect_mm_slot(mm_slot);
3085 static int khugepaged_has_work(void)
3087 return !list_empty(&khugepaged_scan.mm_head) &&
3088 khugepaged_enabled();
3091 static int khugepaged_wait_event(void)
3093 return !list_empty(&khugepaged_scan.mm_head) ||
3094 kthread_should_stop();
3097 static void khugepaged_do_scan(void)
3099 struct page *hpage = NULL;
3100 unsigned int progress = 0, pass_through_head = 0;
3101 unsigned int pages = khugepaged_pages_to_scan;
3104 barrier(); /* write khugepaged_pages_to_scan to local stack */
3106 lru_add_drain_all();
3108 while (progress < pages) {
3109 if (!khugepaged_prealloc_page(&hpage, &wait))
3114 if (unlikely(kthread_should_stop() || try_to_freeze()))
3117 spin_lock(&khugepaged_mm_lock);
3118 if (!khugepaged_scan.mm_slot)
3119 pass_through_head++;
3120 if (khugepaged_has_work() &&
3121 pass_through_head < 2)
3122 progress += khugepaged_scan_mm_slot(pages - progress,
3126 spin_unlock(&khugepaged_mm_lock);
3129 if (!IS_ERR_OR_NULL(hpage))
3133 static bool khugepaged_should_wakeup(void)
3135 return kthread_should_stop() ||
3136 time_after_eq(jiffies, khugepaged_sleep_expire);
3139 static void khugepaged_wait_work(void)
3141 if (khugepaged_has_work()) {
3142 const unsigned long scan_sleep_jiffies =
3143 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
3145 if (!scan_sleep_jiffies)
3148 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
3149 wait_event_freezable_timeout(khugepaged_wait,
3150 khugepaged_should_wakeup(),
3151 scan_sleep_jiffies);
3155 if (khugepaged_enabled())
3156 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
3159 #include <linux/delay.h>
3160 bool eager_allocation = false;
3162 static int khugepaged(void *none)
3164 struct mm_slot *mm_slot;
3167 set_user_nice(current, MAX_NICE);
3169 while (!kthread_should_stop()) {
3170 khugepaged_do_scan();
3171 khugepaged_wait_work();
3174 spin_lock(&khugepaged_mm_lock);
3175 mm_slot = khugepaged_scan.mm_slot;
3176 khugepaged_scan.mm_slot = NULL;
3178 collect_mm_slot(mm_slot);
3179 spin_unlock(&khugepaged_mm_lock);
3183 static void set_recommended_min_free_kbytes(void)
3187 unsigned long recommended_min;
3189 for_each_populated_zone(zone) {
3191 * We don't need to worry about fragmentation of
3192 * ZONE_MOVABLE since it only has movable pages.
3194 if (zone_idx(zone) > gfp_zone(GFP_USER))
3200 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
3201 recommended_min = pageblock_nr_pages * nr_zones * 2;
3204 * Make sure that on average at least two pageblocks are almost free
3205 * of another type, one for a migratetype to fall back to and a
3206 * second to avoid subsequent fallbacks of other types There are 3
3207 * MIGRATE_TYPES we care about.
3209 recommended_min += pageblock_nr_pages * nr_zones *
3210 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
3212 /* don't ever allow to reserve more than 5% of the lowmem */
3213 recommended_min = min(recommended_min,
3214 (unsigned long) nr_free_buffer_pages() / 20);
3215 recommended_min <<= (PAGE_SHIFT-10);
3217 if (recommended_min > min_free_kbytes) {
3218 if (user_min_free_kbytes >= 0)
3219 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
3220 min_free_kbytes, recommended_min);
3222 min_free_kbytes = recommended_min;
3224 setup_per_zone_wmarks();
3227 int start_stop_khugepaged(void)
3231 mutex_lock(&khugepaged_mutex);
3232 if (khugepaged_enabled()) {
3233 if (!khugepaged_thread)
3234 khugepaged_thread = kthread_run(khugepaged, NULL,
3236 if (IS_ERR(khugepaged_thread)) {
3237 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
3238 err = PTR_ERR(khugepaged_thread);
3239 khugepaged_thread = NULL;
3243 if (!list_empty(&khugepaged_scan.mm_head))
3244 wake_up_interruptible(&khugepaged_wait);
3246 set_recommended_min_free_kbytes();
3247 } else if (khugepaged_thread) {
3248 kthread_stop(khugepaged_thread);
3249 khugepaged_thread = NULL;
3252 mutex_unlock(&khugepaged_mutex);
3256 void khugepaged_min_free_kbytes_update(void)
3258 mutex_lock(&khugepaged_mutex);
3259 if (khugepaged_enabled() && khugepaged_thread)
3260 set_recommended_min_free_kbytes();
3261 mutex_unlock(&khugepaged_mutex);