thp, khugepaged: skip retracting page table if a 64KB hugepage mapping is already...
[platform/kernel/linux-rpi.git] / mm / khugepaged.c
1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/swapops.h>
20 #include <linux/shmem_fs.h>
21
22 #include <asm/tlb.h>
23 #include <asm/pgalloc.h>
24 #ifdef CONFIG_FINEGRAINED_THP
25 #include <asm/finegrained_thp.h>
26 #include <asm/huge_mm.h>
27 #else
28 #include <asm-generic/finegrained_thp.h>
29 #include <asm-generic/huge_mm.h>
30 #endif
31 #include "internal.h"
32
33 enum scan_result {
34         SCAN_FAIL,
35         SCAN_SUCCEED,
36         SCAN_PMD_NULL,
37         SCAN_EXCEED_NONE_PTE,
38         SCAN_EXCEED_SWAP_PTE,
39         SCAN_EXCEED_SHARED_PTE,
40         SCAN_PTE_NON_PRESENT,
41         SCAN_PTE_UFFD_WP,
42         SCAN_PAGE_RO,
43         SCAN_LACK_REFERENCED_PAGE,
44         SCAN_PAGE_NULL,
45         SCAN_SCAN_ABORT,
46         SCAN_PAGE_COUNT,
47         SCAN_PAGE_LRU,
48         SCAN_PAGE_LOCK,
49         SCAN_PAGE_ANON,
50         SCAN_PAGE_COMPOUND,
51         SCAN_ANY_PROCESS,
52         SCAN_VMA_NULL,
53         SCAN_VMA_CHECK,
54         SCAN_ADDRESS_RANGE,
55         SCAN_SWAP_CACHE_PAGE,
56         SCAN_DEL_PAGE_LRU,
57         SCAN_ALLOC_HUGE_PAGE_FAIL,
58         SCAN_CGROUP_CHARGE_FAIL,
59         SCAN_TRUNCATED,
60         SCAN_PAGE_HAS_PRIVATE,
61 };
62
63 #define CREATE_TRACE_POINTS
64 #include <trace/events/huge_memory.h>
65
66 static struct task_struct *khugepaged_thread __read_mostly;
67 static DEFINE_MUTEX(khugepaged_mutex);
68
69 /* default scan 8*512 pte (or vmas) every 30 second */
70 static unsigned int khugepaged_pages_to_scan __read_mostly;
71 static unsigned int khugepaged_pages_collapsed;
72 static unsigned int khugepaged_full_scans;
73 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
74 /* during fragmentation poll the hugepage allocator once every minute */
75 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
76 static unsigned long khugepaged_sleep_expire;
77 static DEFINE_SPINLOCK(khugepaged_mm_lock);
78 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
79 /*
80  * default collapse hugepages if there is at least one pte mapped like
81  * it would have happened if the vma was large enough during page
82  * fault.
83  */
84 static unsigned int khugepaged_max_ptes_none __read_mostly;
85 static unsigned int khugepaged_max_ptes_swap __read_mostly;
86 static unsigned int khugepaged_max_ptes_shared __read_mostly;
87
88 #ifdef CONFIG_FINEGRAINED_THP
89 /*
90  * thp_scan_hint:
91  * it used for providing hints to khugepaged
92  * which address space is changed recently.
93  */
94 struct thp_scan_hint {
95         struct mm_slot *slot;
96         struct vm_area_struct *vma;
97         unsigned long diff;             /* memory difference */
98         unsigned long jiffies;          /* time stamp for profiling purpose */
99         struct list_head hint_list;
100 };
101
102 /* THP type descriptor */
103 enum {
104         THP_TYPE_FAIL,  /* cannot make hugepage */
105         THP_TYPE_64KB,  /* 64KB hugepage can be made, use CONT_PTE */
106         THP_TYPE_2MB,   /* 2MB hugepage can be made, use PMD */
107 };
108
109 static unsigned int khugepaged_max_ptes_none_64kb __read_mostly;
110 static unsigned int khugepaged_max_ptes_swap_64kb __read_mostly;
111 static unsigned int khugepaged_max_ptes_shared_64kb __read_mostly;
112 #endif /* CONFIG_FINEGRAINED_THP */
113
114 #define MM_SLOTS_HASH_BITS 10
115 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
116
117 static struct kmem_cache *mm_slot_cache __read_mostly;
118
119 #define MAX_PTE_MAPPED_THP 8
120
121 /**
122  * struct mm_slot - hash lookup from mm to mm_slot
123  * @hash: hash collision list
124  * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
125  * @mm: the mm that this information is valid for
126  */
127 struct mm_slot {
128         struct hlist_node hash;
129         struct list_head mm_node;
130         struct mm_struct *mm;
131
132         /* pte-mapped THP in this mm */
133         int nr_pte_mapped_thp;
134         unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
135 };
136
137 /**
138  * struct khugepaged_scan - cursor for scanning
139  * @mm_head: the head of the mm list to scan
140  * @mm_slot: the current mm_slot we are scanning
141  * @address: the next address inside that to be scanned
142  *
143  * There is only the one khugepaged_scan instance of this cursor structure.
144  */
145 struct khugepaged_scan {
146         struct list_head mm_head;
147         struct mm_slot *mm_slot;
148         unsigned long address;
149 #ifdef CONFIG_FINEGRAINED_THP
150         int hpage_type;
151         int nr_hint;
152         struct list_head hint_list;
153 #endif /* CONFIG_FINEGRAINED_THP */
154 };
155
156 static struct khugepaged_scan khugepaged_scan = {
157         .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
158 #ifdef CONFIG_FINEGRAINED_THP
159         .hint_list = LIST_HEAD_INIT(khugepaged_scan.hint_list),
160 #endif
161 };
162
163 #ifdef CONFIG_SYSFS
164 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
165                                          struct kobj_attribute *attr,
166                                          char *buf)
167 {
168         return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
169 }
170
171 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
172                                           struct kobj_attribute *attr,
173                                           const char *buf, size_t count)
174 {
175         unsigned long msecs;
176         int err;
177
178         err = kstrtoul(buf, 10, &msecs);
179         if (err || msecs > UINT_MAX)
180                 return -EINVAL;
181
182         khugepaged_scan_sleep_millisecs = msecs;
183         khugepaged_sleep_expire = 0;
184         wake_up_interruptible(&khugepaged_wait);
185
186         return count;
187 }
188 static struct kobj_attribute scan_sleep_millisecs_attr =
189         __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
190                scan_sleep_millisecs_store);
191
192 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
193                                           struct kobj_attribute *attr,
194                                           char *buf)
195 {
196         return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
197 }
198
199 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
200                                            struct kobj_attribute *attr,
201                                            const char *buf, size_t count)
202 {
203         unsigned long msecs;
204         int err;
205
206         err = kstrtoul(buf, 10, &msecs);
207         if (err || msecs > UINT_MAX)
208                 return -EINVAL;
209
210         khugepaged_alloc_sleep_millisecs = msecs;
211         khugepaged_sleep_expire = 0;
212         wake_up_interruptible(&khugepaged_wait);
213
214         return count;
215 }
216 static struct kobj_attribute alloc_sleep_millisecs_attr =
217         __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
218                alloc_sleep_millisecs_store);
219
220 static ssize_t pages_to_scan_show(struct kobject *kobj,
221                                   struct kobj_attribute *attr,
222                                   char *buf)
223 {
224         return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
225 }
226 static ssize_t pages_to_scan_store(struct kobject *kobj,
227                                    struct kobj_attribute *attr,
228                                    const char *buf, size_t count)
229 {
230         int err;
231         unsigned long pages;
232
233         err = kstrtoul(buf, 10, &pages);
234         if (err || !pages || pages > UINT_MAX)
235                 return -EINVAL;
236
237         khugepaged_pages_to_scan = pages;
238
239         return count;
240 }
241 static struct kobj_attribute pages_to_scan_attr =
242         __ATTR(pages_to_scan, 0644, pages_to_scan_show,
243                pages_to_scan_store);
244
245 static ssize_t pages_collapsed_show(struct kobject *kobj,
246                                     struct kobj_attribute *attr,
247                                     char *buf)
248 {
249         return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
250 }
251 static struct kobj_attribute pages_collapsed_attr =
252         __ATTR_RO(pages_collapsed);
253
254 static ssize_t full_scans_show(struct kobject *kobj,
255                                struct kobj_attribute *attr,
256                                char *buf)
257 {
258         return sprintf(buf, "%u\n", khugepaged_full_scans);
259 }
260 static struct kobj_attribute full_scans_attr =
261         __ATTR_RO(full_scans);
262
263 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
264                                       struct kobj_attribute *attr, char *buf)
265 {
266         return single_hugepage_flag_show(kobj, attr, buf,
267                                 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
268 }
269 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
270                                        struct kobj_attribute *attr,
271                                        const char *buf, size_t count)
272 {
273         return single_hugepage_flag_store(kobj, attr, buf, count,
274                                  TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
275 }
276 static struct kobj_attribute khugepaged_defrag_attr =
277         __ATTR(defrag, 0644, khugepaged_defrag_show,
278                khugepaged_defrag_store);
279
280 /*
281  * max_ptes_none controls if khugepaged should collapse hugepages over
282  * any unmapped ptes in turn potentially increasing the memory
283  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
284  * reduce the available free memory in the system as it
285  * runs. Increasing max_ptes_none will instead potentially reduce the
286  * free memory in the system during the khugepaged scan.
287  */
288 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
289                                              struct kobj_attribute *attr,
290                                              char *buf)
291 {
292         return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
293 }
294 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
295                                               struct kobj_attribute *attr,
296                                               const char *buf, size_t count)
297 {
298         int err;
299         unsigned long max_ptes_none;
300
301         err = kstrtoul(buf, 10, &max_ptes_none);
302         if (err || max_ptes_none > HPAGE_PMD_NR-1)
303                 return -EINVAL;
304
305         khugepaged_max_ptes_none = max_ptes_none;
306
307         return count;
308 }
309 static struct kobj_attribute khugepaged_max_ptes_none_attr =
310         __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
311                khugepaged_max_ptes_none_store);
312
313 static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
314                                              struct kobj_attribute *attr,
315                                              char *buf)
316 {
317         return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
318 }
319
320 static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
321                                               struct kobj_attribute *attr,
322                                               const char *buf, size_t count)
323 {
324         int err;
325         unsigned long max_ptes_swap;
326
327         err  = kstrtoul(buf, 10, &max_ptes_swap);
328         if (err || max_ptes_swap > HPAGE_PMD_NR-1)
329                 return -EINVAL;
330
331         khugepaged_max_ptes_swap = max_ptes_swap;
332
333         return count;
334 }
335
336 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
337         __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
338                khugepaged_max_ptes_swap_store);
339
340 static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj,
341                                              struct kobj_attribute *attr,
342                                              char *buf)
343 {
344         return sprintf(buf, "%u\n", khugepaged_max_ptes_shared);
345 }
346
347 static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
348                                               struct kobj_attribute *attr,
349                                               const char *buf, size_t count)
350 {
351         int err;
352         unsigned long max_ptes_shared;
353
354         err  = kstrtoul(buf, 10, &max_ptes_shared);
355         if (err || max_ptes_shared > HPAGE_PMD_NR-1)
356                 return -EINVAL;
357
358         khugepaged_max_ptes_shared = max_ptes_shared;
359
360         return count;
361 }
362
363 static struct kobj_attribute khugepaged_max_ptes_shared_attr =
364         __ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show,
365                khugepaged_max_ptes_shared_store);
366
367 static struct attribute *khugepaged_attr[] = {
368         &khugepaged_defrag_attr.attr,
369         &khugepaged_max_ptes_none_attr.attr,
370         &khugepaged_max_ptes_swap_attr.attr,
371         &khugepaged_max_ptes_shared_attr.attr,
372         &pages_to_scan_attr.attr,
373         &pages_collapsed_attr.attr,
374         &full_scans_attr.attr,
375         &scan_sleep_millisecs_attr.attr,
376         &alloc_sleep_millisecs_attr.attr,
377         NULL,
378 };
379
380 struct attribute_group khugepaged_attr_group = {
381         .attrs = khugepaged_attr,
382         .name = "khugepaged",
383 };
384 #endif /* CONFIG_SYSFS */
385
386 int hugepage_madvise(struct vm_area_struct *vma,
387                      unsigned long *vm_flags, int advice)
388 {
389         switch (advice) {
390         case MADV_HUGEPAGE:
391 #ifdef CONFIG_S390
392                 /*
393                  * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
394                  * can't handle this properly after s390_enable_sie, so we simply
395                  * ignore the madvise to prevent qemu from causing a SIGSEGV.
396                  */
397                 if (mm_has_pgste(vma->vm_mm))
398                         return 0;
399 #endif
400                 *vm_flags &= ~VM_NOHUGEPAGE;
401                 *vm_flags |= VM_HUGEPAGE;
402                 /*
403                  * If the vma become good for khugepaged to scan,
404                  * register it here without waiting a page fault that
405                  * may not happen any time soon.
406                  */
407                 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
408                                 khugepaged_enter_vma_merge(vma, *vm_flags))
409                         return -ENOMEM;
410                 break;
411         case MADV_NOHUGEPAGE:
412                 *vm_flags &= ~VM_HUGEPAGE;
413                 *vm_flags |= VM_NOHUGEPAGE;
414                 /*
415                  * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
416                  * this vma even if we leave the mm registered in khugepaged if
417                  * it got registered before VM_NOHUGEPAGE was set.
418                  */
419                 break;
420         }
421
422         return 0;
423 }
424
425 int __init khugepaged_init(void)
426 {
427         mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
428                                           sizeof(struct mm_slot),
429                                           __alignof__(struct mm_slot), 0, NULL);
430         if (!mm_slot_cache)
431                 return -ENOMEM;
432
433         khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
434         khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
435         khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
436         khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
437
438 #ifdef CONFIG_FINEGRAINED_THP
439         khugepaged_max_ptes_none_64kb = HPAGE_CONT_PTE_NR - 1;
440         khugepaged_max_ptes_swap_64kb = HPAGE_CONT_PTE_NR / 8;
441         khugepaged_max_ptes_shared_64kb = HPAGE_CONT_PTE_NR / 2;
442 #endif
443         return 0;
444 }
445
446 void __init khugepaged_destroy(void)
447 {
448         kmem_cache_destroy(mm_slot_cache);
449 }
450
451 static inline struct mm_slot *alloc_mm_slot(void)
452 {
453         if (!mm_slot_cache)     /* initialization failed */
454                 return NULL;
455         return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
456 }
457
458 static inline void free_mm_slot(struct mm_slot *mm_slot)
459 {
460         kmem_cache_free(mm_slot_cache, mm_slot);
461 }
462
463 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
464 {
465         struct mm_slot *mm_slot;
466
467         hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
468                 if (mm == mm_slot->mm)
469                         return mm_slot;
470
471         return NULL;
472 }
473
474 static void insert_to_mm_slots_hash(struct mm_struct *mm,
475                                     struct mm_slot *mm_slot)
476 {
477         mm_slot->mm = mm;
478         hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
479 }
480
481 static inline int khugepaged_test_exit(struct mm_struct *mm)
482 {
483         return atomic_read(&mm->mm_users) == 0;
484 }
485
486 #ifdef CONFIG_FINEGRAINED_THP
487 static void clear_hint_list(struct mm_slot *slot);
488 #endif /* CONFIG_FINEGRAINED_THP */
489
490 static bool hugepage_vma_check(struct vm_area_struct *vma,
491                                unsigned long vm_flags)
492 {
493         /* Explicitly disabled through madvise. */
494         if ((vm_flags & VM_NOHUGEPAGE) ||
495             test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
496                 return false;
497
498         /* Check arch-dependent shmem hugepage available */
499         if (arch_hugepage_vma_shmem_check(vma, vm_flags))
500                 return true;
501         /* Enabled via shmem mount options or sysfs settings. */
502         else if (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) {
503                 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
504                                 HPAGE_PMD_NR);
505         }
506
507         /* THP settings require madvise. */
508         if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
509                 return false;
510
511         /* Check arch-dependent file hugepage available */
512         if (arch_hugepage_vma_file_check(vma, vm_flags))
513                 return true;
514         /* Read-only file mappings need to be aligned for THP to work. */
515         else if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
516             (vm_flags & VM_DENYWRITE)) {
517                 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
518                                 HPAGE_PMD_NR);
519         }
520
521         if (!vma->anon_vma || vma->vm_ops)
522                 return false;
523         if (vma_is_temporary_stack(vma))
524                 return false;
525         return !(vm_flags & VM_NO_KHUGEPAGED);
526 }
527
528 int __khugepaged_enter(struct mm_struct *mm)
529 {
530         struct mm_slot *mm_slot;
531         int wakeup;
532
533         mm_slot = alloc_mm_slot();
534         if (!mm_slot)
535                 return -ENOMEM;
536
537         /* __khugepaged_exit() must not run from under us */
538         VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
539         if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
540                 free_mm_slot(mm_slot);
541                 return 0;
542         }
543
544         spin_lock(&khugepaged_mm_lock);
545         insert_to_mm_slots_hash(mm, mm_slot);
546         /*
547          * Insert just behind the scanning cursor, to let the area settle
548          * down a little.
549          */
550         wakeup = list_empty(&khugepaged_scan.mm_head);
551         list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
552         spin_unlock(&khugepaged_mm_lock);
553
554         mmgrab(mm);
555         if (wakeup)
556                 wake_up_interruptible(&khugepaged_wait);
557
558         return 0;
559 }
560
561 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
562                                unsigned long vm_flags)
563 {
564         unsigned long hstart, hend;
565
566         /*
567          * khugepaged only supports read-only files for non-shmem files.
568          * khugepaged does not yet work on special mappings. And
569          * file-private shmem THP is not supported.
570          */
571         if (!hugepage_vma_check(vma, vm_flags))
572                 return 0;
573
574         hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
575         hend = vma->vm_end & HPAGE_PMD_MASK;
576         if (hstart < hend)
577                 return khugepaged_enter(vma, vm_flags);
578 #ifdef CONFIG_FINEGRAINED_THP
579         hstart = (vma->vm_start + ~HPAGE_CONT_PTE_MASK) & HPAGE_CONT_PTE_MASK;
580         hend = vma->vm_end & HPAGE_CONT_PTE_MASK;
581         if (hstart < hend)
582                 return khugepaged_enter(vma, vm_flags);
583 #endif /* CONFIG_FINEGRAINED_THP */
584         return 0;
585 }
586
587 void __khugepaged_exit(struct mm_struct *mm)
588 {
589         struct mm_slot *mm_slot;
590         int free = 0;
591
592         spin_lock(&khugepaged_mm_lock);
593         mm_slot = get_mm_slot(mm);
594         if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
595 #ifdef CONFIG_FINEGRAINED_THP
596                 clear_hint_list(mm_slot);
597 #endif
598                 hash_del(&mm_slot->hash);
599                 list_del(&mm_slot->mm_node);
600                 free = 1;
601         }
602         spin_unlock(&khugepaged_mm_lock);
603
604         if (free) {
605                 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
606                 free_mm_slot(mm_slot);
607                 mmdrop(mm);
608         } else if (mm_slot) {
609                 /*
610                  * This is required to serialize against
611                  * khugepaged_test_exit() (which is guaranteed to run
612                  * under mmap sem read mode). Stop here (after we
613                  * return all pagetables will be destroyed) until
614                  * khugepaged has finished working on the pagetables
615                  * under the mmap_lock.
616                  */
617                 mmap_write_lock(mm);
618                 mmap_write_unlock(mm);
619         }
620 }
621
622 static void release_pte_page(struct page *page)
623 {
624         mod_node_page_state(page_pgdat(page),
625                         NR_ISOLATED_ANON + page_is_file_lru(page),
626                         -compound_nr(page));
627         unlock_page(page);
628         putback_lru_page(page);
629 }
630
631 static void release_pte_pages(pte_t *pte, pte_t *_pte,
632                 struct list_head *compound_pagelist)
633 {
634         struct page *page, *tmp;
635
636         while (--_pte >= pte) {
637                 pte_t pteval = *_pte;
638
639                 page = pte_page(pteval);
640                 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
641                                 !PageCompound(page))
642                         release_pte_page(page);
643         }
644
645         list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
646                 list_del(&page->lru);
647                 release_pte_page(page);
648         }
649 }
650
651 static bool is_refcount_suitable(struct page *page)
652 {
653         int expected_refcount;
654
655         expected_refcount = total_mapcount(page);
656         if (PageSwapCache(page))
657                 expected_refcount += compound_nr(page);
658
659         return page_count(page) == expected_refcount;
660 }
661
662 #ifdef CONFIG_FINEGRAINED_THP
663 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
664                                         unsigned long address,
665                                         pte_t *pte,
666                                         struct list_head *compound_pagelist,
667                                         int hpage_type)
668 #else /* CONFIG_FINEGRAINED_THP */
669 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
670                                         unsigned long address,
671                                         pte_t *pte,
672                                         struct list_head *compound_pagelist)
673 #endif /* CONFIG_FINEGRAINED_THP */
674 {
675         struct page *page = NULL;
676         pte_t *_pte;
677         int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
678         bool writable = false;
679 #ifdef CONFIG_FINEGRAINED_THP
680         int max_ptes_shared, max_ptes_none;
681         int hpage_nr;
682
683         if (hpage_type == THP_TYPE_64KB) {
684                 hpage_nr = HPAGE_CONT_PTE_NR;
685                 max_ptes_shared = khugepaged_max_ptes_shared_64kb;
686                 max_ptes_none = khugepaged_max_ptes_none_64kb;
687         } else {
688                 hpage_nr = HPAGE_PMD_NR;
689                 max_ptes_shared = khugepaged_max_ptes_shared;
690                 max_ptes_none = khugepaged_max_ptes_none;
691         }
692 #endif /* CONFIG_FINEGRAINED_THP */
693
694         for (_pte = pte;
695 #ifdef CONFIG_FINEGRAINED_THP
696                 _pte < pte + hpage_nr;
697 #else
698                 _pte < pte+HPAGE_PMD_NR;
699 #endif
700              _pte++, address += PAGE_SIZE) {
701                 pte_t pteval = *_pte;
702                 if (pte_none(pteval) || (pte_present(pteval) &&
703                                 is_zero_pfn(pte_pfn(pteval)))) {
704 #ifdef CONFIG_FINEGRAINED_THP
705                         if (!userfaultfd_armed(vma) &&
706                             ++none_or_zero <= max_ptes_none)
707 #else /* CONFIG_FINEGRAINED_THP */
708                         if (!userfaultfd_armed(vma) &&
709                             ++none_or_zero <= khugepaged_max_ptes_none)
710 #endif /* CONFIG_FINEGRAINED_THP */
711                         {
712                                 continue;
713                         } else {
714                                 result = SCAN_EXCEED_NONE_PTE;
715                                 goto out;
716                         }
717                 }
718                 if (!pte_present(pteval)) {
719                         result = SCAN_PTE_NON_PRESENT;
720                         goto out;
721                 }
722                 page = vm_normal_page(vma, address, pteval);
723                 if (unlikely(!page)) {
724                         result = SCAN_PAGE_NULL;
725                         goto out;
726                 }
727
728                 VM_BUG_ON_PAGE(!PageAnon(page), page);
729
730 #ifdef CONFIG_FINEGRAINED_THP
731                 if (page_mapcount(page) > 1 &&
732                                 ++shared > max_ptes_shared)
733 #else /* CONFIG_FINEGRAINED_THP */
734                 if (page_mapcount(page) > 1 &&
735                                 ++shared > khugepaged_max_ptes_shared)
736 #endif /* CONFIG_FINEGRAINED_THP */
737                 {
738                         result = SCAN_EXCEED_SHARED_PTE;
739                         goto out;
740                 }
741
742                 if (PageCompound(page)) {
743                         struct page *p;
744                         page = compound_head(page);
745
746                         /*
747                          * Check if we have dealt with the compound page
748                          * already
749                          */
750                         list_for_each_entry(p, compound_pagelist, lru) {
751                                 if (page == p)
752                                         goto next;
753                         }
754                 }
755
756                 /*
757                  * We can do it before isolate_lru_page because the
758                  * page can't be freed from under us. NOTE: PG_lock
759                  * is needed to serialize against split_huge_page
760                  * when invoked from the VM.
761                  */
762                 if (!trylock_page(page)) {
763                         result = SCAN_PAGE_LOCK;
764                         goto out;
765                 }
766
767                 /*
768                  * Check if the page has any GUP (or other external) pins.
769                  *
770                  * The page table that maps the page has been already unlinked
771                  * from the page table tree and this process cannot get
772                  * an additinal pin on the page.
773                  *
774                  * New pins can come later if the page is shared across fork,
775                  * but not from this process. The other process cannot write to
776                  * the page, only trigger CoW.
777                  */
778                 if (!is_refcount_suitable(page)) {
779                         unlock_page(page);
780                         result = SCAN_PAGE_COUNT;
781                         goto out;
782                 }
783                 if (!pte_write(pteval) && PageSwapCache(page) &&
784                                 !reuse_swap_page(page, NULL)) {
785                         /*
786                          * Page is in the swap cache and cannot be re-used.
787                          * It cannot be collapsed into a THP.
788                          */
789                         unlock_page(page);
790                         result = SCAN_SWAP_CACHE_PAGE;
791                         goto out;
792                 }
793
794                 /*
795                  * Isolate the page to avoid collapsing an hugepage
796                  * currently in use by the VM.
797                  */
798                 if (isolate_lru_page(page)) {
799                         unlock_page(page);
800                         result = SCAN_DEL_PAGE_LRU;
801                         goto out;
802                 }
803                 mod_node_page_state(page_pgdat(page),
804                                 NR_ISOLATED_ANON + page_is_file_lru(page),
805                                 compound_nr(page));
806                 VM_BUG_ON_PAGE(!PageLocked(page), page);
807                 VM_BUG_ON_PAGE(PageLRU(page), page);
808
809                 if (PageCompound(page))
810                         list_add_tail(&page->lru, compound_pagelist);
811 next:
812                 /* There should be enough young pte to collapse the page */
813                 if (pte_young(pteval) ||
814                     page_is_young(page) || PageReferenced(page) ||
815                     mmu_notifier_test_young(vma->vm_mm, address))
816                         referenced++;
817
818                 if (pte_write(pteval))
819                         writable = true;
820         }
821         if (likely(writable)) {
822                 if (likely(referenced)) {
823                         result = SCAN_SUCCEED;
824                         trace_mm_collapse_huge_page_isolate(page, none_or_zero,
825                                                             referenced, writable, result);
826                         return 1;
827                 }
828         } else {
829                 result = SCAN_PAGE_RO;
830         }
831
832 out:
833         release_pte_pages(pte, _pte, compound_pagelist);
834         trace_mm_collapse_huge_page_isolate(page, none_or_zero,
835                                             referenced, writable, result);
836         return 0;
837 }
838
839 #ifdef CONFIG_FINEGRAINED_THP
840 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
841                                       struct vm_area_struct *vma,
842                                       unsigned long address,
843                                       spinlock_t *ptl,
844                                       struct list_head *compound_pagelist,
845                                       int hpage_type)
846 #else /* CONFIG_FINEGRAINED_THP */
847 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
848                                       struct vm_area_struct *vma,
849                                       unsigned long address,
850                                       spinlock_t *ptl,
851                                       struct list_head *compound_pagelist)
852 #endif /* CONFIG_FINEGRAINED_THP */
853 {
854         struct page *src_page, *tmp;
855         pte_t *_pte;
856 #ifdef CONFIG_FINEGRAINED_THP
857         int hpage_nr = (hpage_type == THP_TYPE_64KB ?
858                                         HPAGE_CONT_PTE_NR : HPAGE_PMD_NR);
859 #endif
860
861         for (_pte = pte;
862 #ifdef CONFIG_FINEGRAINED_THP
863                                 _pte < pte + hpage_nr;
864 #else
865                                 _pte < pte + HPAGE_PMD_NR;
866 #endif
867                                 _pte++, page++, address += PAGE_SIZE) {
868                 pte_t pteval = *_pte;
869
870                 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
871                         clear_user_highpage(page, address);
872                         add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
873                         if (is_zero_pfn(pte_pfn(pteval))) {
874                                 /*
875                                  * ptl mostly unnecessary.
876                                  */
877                                 spin_lock(ptl);
878                                 /*
879                                  * paravirt calls inside pte_clear here are
880                                  * superfluous.
881                                  */
882                                 pte_clear(vma->vm_mm, address, _pte);
883                                 spin_unlock(ptl);
884                         }
885                 } else {
886                         src_page = pte_page(pteval);
887                         copy_user_highpage(page, src_page, address, vma);
888                         if (!PageCompound(src_page))
889                                 release_pte_page(src_page);
890                         /*
891                          * ptl mostly unnecessary, but preempt has to
892                          * be disabled to update the per-cpu stats
893                          * inside page_remove_rmap().
894                          */
895                         spin_lock(ptl);
896                         /*
897                          * paravirt calls inside pte_clear here are
898                          * superfluous.
899                          */
900                         pte_clear(vma->vm_mm, address, _pte);
901                         page_remove_rmap(src_page, false);
902                         spin_unlock(ptl);
903                         free_page_and_swap_cache(src_page);
904                 }
905         }
906
907         list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
908                 list_del(&src_page->lru);
909                 release_pte_page(src_page);
910         }
911 }
912
913 static void khugepaged_alloc_sleep(void)
914 {
915         DEFINE_WAIT(wait);
916
917         add_wait_queue(&khugepaged_wait, &wait);
918         freezable_schedule_timeout_interruptible(
919                 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
920         remove_wait_queue(&khugepaged_wait, &wait);
921 }
922
923 static int khugepaged_node_load[MAX_NUMNODES];
924
925 static bool khugepaged_scan_abort(int nid)
926 {
927         int i;
928
929         /*
930          * If node_reclaim_mode is disabled, then no extra effort is made to
931          * allocate memory locally.
932          */
933         if (!node_reclaim_mode)
934                 return false;
935
936         /* If there is a count for this node already, it must be acceptable */
937         if (khugepaged_node_load[nid])
938                 return false;
939
940         for (i = 0; i < MAX_NUMNODES; i++) {
941                 if (!khugepaged_node_load[i])
942                         continue;
943                 if (node_distance(nid, i) > node_reclaim_distance)
944                         return true;
945         }
946         return false;
947 }
948
949 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
950 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
951 {
952         return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
953 }
954
955 #ifdef CONFIG_NUMA
956 static int khugepaged_find_target_node(void)
957 {
958         static int last_khugepaged_target_node = NUMA_NO_NODE;
959         int nid, target_node = 0, max_value = 0;
960
961         /* find first node with max normal pages hit */
962         for (nid = 0; nid < MAX_NUMNODES; nid++)
963                 if (khugepaged_node_load[nid] > max_value) {
964                         max_value = khugepaged_node_load[nid];
965                         target_node = nid;
966                 }
967
968         /* do some balance if several nodes have the same hit record */
969         if (target_node <= last_khugepaged_target_node)
970                 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
971                                 nid++)
972                         if (max_value == khugepaged_node_load[nid]) {
973                                 target_node = nid;
974                                 break;
975                         }
976
977         last_khugepaged_target_node = target_node;
978         return target_node;
979 }
980
981 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
982 {
983         if (IS_ERR(*hpage)) {
984                 if (!*wait)
985                         return false;
986
987                 *wait = false;
988                 *hpage = NULL;
989                 khugepaged_alloc_sleep();
990         } else if (*hpage) {
991                 put_page(*hpage);
992                 *hpage = NULL;
993         }
994
995         return true;
996 }
997
998 static struct page *
999 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
1000 {
1001         VM_BUG_ON_PAGE(*hpage, *hpage);
1002
1003         *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
1004         if (unlikely(!*hpage)) {
1005                 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1006                 *hpage = ERR_PTR(-ENOMEM);
1007                 return NULL;
1008         }
1009
1010         prep_transhuge_page(*hpage);
1011         count_vm_event(THP_COLLAPSE_ALLOC);
1012         return *hpage;
1013 }
1014 #else
1015 static int khugepaged_find_target_node(void)
1016 {
1017         return 0;
1018 }
1019
1020 #ifdef CONFIG_FINEGRAINED_THP
1021 static inline struct page *alloc_khugepaged_hugepage(int hpage_order)
1022 #else
1023 static inline struct page *alloc_khugepaged_hugepage(void)
1024 #endif
1025 {
1026         struct page *page;
1027
1028 #ifdef CONFIG_FINEGRAINED_THP
1029         page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
1030                            hpage_order);
1031 #else
1032         page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
1033                            HPAGE_PMD_ORDER);
1034 #endif
1035         if (page)
1036                 prep_transhuge_page(page);
1037         return page;
1038 }
1039
1040 static struct page *khugepaged_alloc_hugepage(bool *wait)
1041 {
1042         struct page *hpage;
1043
1044         do {
1045 #ifdef CONFIG_FINEGRAINED_THP
1046                 hpage = alloc_khugepaged_hugepage(HPAGE_PMD_ORDER);
1047 #else
1048                 hpage = alloc_khugepaged_hugepage();
1049 #endif
1050                 if (!hpage) {
1051                         count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1052                         if (!*wait)
1053                                 return NULL;
1054
1055                         *wait = false;
1056                         khugepaged_alloc_sleep();
1057                 } else
1058                         count_vm_event(THP_COLLAPSE_ALLOC);
1059         } while (unlikely(!hpage) && likely(khugepaged_enabled()));
1060
1061         return hpage;
1062 }
1063
1064 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
1065 {
1066         /*
1067          * If the hpage allocated earlier was briefly exposed in page cache
1068          * before collapse_file() failed, it is possible that racing lookups
1069          * have not yet completed, and would then be unpleasantly surprised by
1070          * finding the hpage reused for the same mapping at a different offset.
1071          * Just release the previous allocation if there is any danger of that.
1072          */
1073         if (*hpage && page_count(*hpage) > 1) {
1074                 put_page(*hpage);
1075                 *hpage = NULL;
1076         }
1077
1078         if (!*hpage)
1079                 *hpage = khugepaged_alloc_hugepage(wait);
1080
1081         if (unlikely(!*hpage))
1082                 return false;
1083
1084         return true;
1085 }
1086
1087 #ifdef CONFIG_FINEGRAINED_THP
1088 static struct page *
1089 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node, int hpage_type)
1090 {
1091         struct page *page;
1092
1093         if (hpage_type == THP_TYPE_64KB)
1094                 page = alloc_khugepaged_hugepage(HPAGE_CONT_PTE_ORDER);
1095         else {
1096                 VM_BUG_ON(!*hpage);
1097                 page = *hpage;
1098         }
1099         return page;
1100 }
1101 #else /* CONFIG_FINEGRAINED_THP */
1102 static struct page *
1103 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
1104 {
1105         VM_BUG_ON(!*hpage);
1106
1107         return  *hpage;
1108 }
1109 #endif /* CONFIG_FINEGRAINED_THP */
1110 #endif
1111
1112 /*
1113  * If mmap_lock temporarily dropped, revalidate vma
1114  * before taking mmap_lock.
1115  * Return 0 if succeeds, otherwise return none-zero
1116  * value (scan code).
1117  */
1118
1119 #ifdef CONFIG_FINEGRAINED_THP
1120 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
1121                 struct vm_area_struct **vmap, int hpage_type)
1122 #else
1123 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
1124                 struct vm_area_struct **vmap)
1125 #endif
1126 {
1127         struct vm_area_struct *vma;
1128         unsigned long hstart, hend;
1129
1130         if (unlikely(khugepaged_test_exit(mm)))
1131                 return SCAN_ANY_PROCESS;
1132
1133         *vmap = vma = find_vma(mm, address);
1134         if (!vma)
1135                 return SCAN_VMA_NULL;
1136
1137 #ifdef CONFIG_FINEGRAINED_THP
1138         if (hpage_type == THP_TYPE_64KB) {
1139                 hstart = (vma->vm_start + ~HPAGE_CONT_PTE_MASK) & HPAGE_CONT_PTE_MASK;
1140                 hend = vma->vm_end & HPAGE_CONT_PTE_MASK;
1141                 if (address < hstart || address + HPAGE_CONT_PTE_SIZE > hend)
1142                         return SCAN_ADDRESS_RANGE;
1143                 if (!hugepage_vma_check(vma, vma->vm_flags))
1144                         return SCAN_VMA_CHECK;
1145                 return 0;
1146         }
1147 #endif /* CONFIG_FINEGRAINED_THP */
1148         hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1149         hend = vma->vm_end & HPAGE_PMD_MASK;
1150         if (address < hstart || address + HPAGE_PMD_SIZE > hend)
1151                 return SCAN_ADDRESS_RANGE;
1152         if (!hugepage_vma_check(vma, vma->vm_flags))
1153                 return SCAN_VMA_CHECK;
1154         /* Anon VMA expected */
1155         if (!vma->anon_vma || vma->vm_ops)
1156                 return SCAN_VMA_CHECK;
1157         return 0;
1158 }
1159
1160 /*
1161  * Bring missing pages in from swap, to complete THP collapse.
1162  * Only done if khugepaged_scan_pmd believes it is worthwhile.
1163  *
1164  * Called and returns without pte mapped or spinlocks held,
1165  * but with mmap_lock held to protect against vma changes.
1166  */
1167
1168 #ifdef CONFIG_FINEGRAINED_THP
1169 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
1170                                         struct vm_area_struct *vma,
1171                                         unsigned long address, pmd_t *pmd,
1172                                         int referenced, int hpage_type)
1173 #else /* CONFIG_FINEGRAINED_THP */
1174 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
1175                                         struct vm_area_struct *vma,
1176                                         unsigned long address, pmd_t *pmd,
1177                                         int referenced)
1178 #endif /* CONFIG_FINEGRAINED_THP */
1179 {
1180         int swapped_in = 0;
1181         vm_fault_t ret = 0;
1182         struct vm_fault vmf = {
1183                 .vma = vma,
1184                 .address = address,
1185                 .flags = FAULT_FLAG_ALLOW_RETRY,
1186                 .pmd = pmd,
1187                 .pgoff = linear_page_index(vma, address),
1188         };
1189 #ifdef CONFIG_FINEGRAINED_THP
1190         int hpage_size = (hpage_type == THP_TYPE_64KB) ?
1191                                                 HPAGE_CONT_PTE_SIZE : HPAGE_PMD_SIZE;
1192 #endif
1193
1194         vmf.pte = pte_offset_map(pmd, address);
1195         for (;
1196 #ifdef CONFIG_FINEGRAINED_THP
1197                         vmf.address < address + hpage_size;
1198 #else
1199                         vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
1200 #endif
1201                         vmf.pte++, vmf.address += PAGE_SIZE) {
1202                 vmf.orig_pte = *vmf.pte;
1203                 if (!is_swap_pte(vmf.orig_pte))
1204                         continue;
1205                 swapped_in++;
1206                 ret = do_swap_page(&vmf);
1207
1208                 /* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
1209                 if (ret & VM_FAULT_RETRY) {
1210                         mmap_read_lock(mm);
1211 #ifdef CONFIG_FINEGRAINED_THP
1212                         if (hugepage_vma_revalidate(mm, address, &vmf.vma, hpage_type))
1213 #else
1214                         if (hugepage_vma_revalidate(mm, address, &vmf.vma))
1215 #endif
1216                         {
1217                                 /* vma is no longer available, don't continue to swapin */
1218                                 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1219                                 return false;
1220                         }
1221                         /* check if the pmd is still valid */
1222                         if (mm_find_pmd(mm, address) != pmd) {
1223                                 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1224                                 return false;
1225                         }
1226                 }
1227                 if (ret & VM_FAULT_ERROR) {
1228                         trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1229                         return false;
1230                 }
1231                 /* pte is unmapped now, we need to map it */
1232                 vmf.pte = pte_offset_map(pmd, vmf.address);
1233         }
1234         vmf.pte--;
1235         pte_unmap(vmf.pte);
1236
1237         /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1238         if (swapped_in)
1239                 lru_add_drain();
1240
1241         trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
1242         return true;
1243 }
1244
1245 #ifdef CONFIG_FINEGRAINED_THP
1246 static void collapse_huge_page(struct mm_struct *mm,
1247                                    unsigned long address,
1248                                    struct page **hpage,
1249                                    int node, int referenced, int unmapped,
1250                                    int hpage_type)
1251 #else /* CONFIG_FINEGRAINED_THP */
1252 static void collapse_huge_page(struct mm_struct *mm,
1253                                    unsigned long address,
1254                                    struct page **hpage,
1255                                    int node, int referenced, int unmapped)
1256 #endif /* CONFIG_FINEGRAINED_THP */
1257 {
1258         LIST_HEAD(compound_pagelist);
1259         pmd_t *pmd, _pmd;
1260         pte_t *pte;
1261         pgtable_t pgtable;
1262         struct page *new_page;
1263         spinlock_t *pmd_ptl, *pte_ptl;
1264         int isolated = 0, result = 0;
1265         struct vm_area_struct *vma;
1266         struct mmu_notifier_range range;
1267         gfp_t gfp;
1268
1269 #ifdef CONFIG_FINEGRAINED_THP
1270         pte_t _pte;
1271
1272         VM_BUG_ON(address & (hpage_type == THP_TYPE_64KB ?
1273                                 ~HPAGE_CONT_PTE_MASK : ~HPAGE_PMD_MASK));
1274 #else
1275         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1276 #endif
1277
1278         /* Only allocate from the target node */
1279         gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1280
1281         /*
1282          * Before allocating the hugepage, release the mmap_lock read lock.
1283          * The allocation can take potentially a long time if it involves
1284          * sync compaction, and we do not need to hold the mmap_lock during
1285          * that. We will recheck the vma after taking it again in write mode.
1286          */
1287         mmap_read_unlock(mm);
1288 #ifdef CONFIG_FINEGRAINED_THP
1289         new_page = khugepaged_alloc_page(hpage, gfp, node, hpage_type);
1290 #else
1291         new_page = khugepaged_alloc_page(hpage, gfp, node);
1292 #endif
1293         if (!new_page) {
1294                 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1295                 goto out_nolock;
1296         }
1297
1298         if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
1299                 result = SCAN_CGROUP_CHARGE_FAIL;
1300                 goto out_nolock;
1301         }
1302         count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1303
1304         mmap_read_lock(mm);
1305 #ifdef CONFIG_FINEGRAINED_THP
1306         result = hugepage_vma_revalidate(mm, address, &vma, hpage_type);
1307 #else
1308         result = hugepage_vma_revalidate(mm, address, &vma);
1309 #endif
1310         if (result) {
1311                 mmap_read_unlock(mm);
1312                 goto out_nolock;
1313         }
1314
1315         pmd = mm_find_pmd(mm, address);
1316         if (!pmd) {
1317                 result = SCAN_PMD_NULL;
1318                 mmap_read_unlock(mm);
1319                 goto out_nolock;
1320         }
1321
1322         /*
1323          * __collapse_huge_page_swapin always returns with mmap_lock locked.
1324          * If it fails, we release mmap_lock and jump out_nolock.
1325          * Continuing to collapse causes inconsistency.
1326          */
1327 #ifdef CONFIG_FINEGRAINED_THP
1328         if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1329                                                      pmd, referenced, hpage_type)) {
1330                 mmap_read_unlock(mm);
1331                 goto out_nolock;
1332         }
1333 #else /* CONFIG_FINEGRAINED_THP */
1334         if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1335                                                      pmd, referenced)) {
1336                 mmap_read_unlock(mm);
1337                 goto out_nolock;
1338         }
1339 #endif /* CONFIG_FINEGRAINED_THP*/
1340
1341         mmap_read_unlock(mm);
1342         /*
1343          * Prevent all access to pagetables with the exception of
1344          * gup_fast later handled by the ptep_clear_flush and the VM
1345          * handled by the anon_vma lock + PG_lock.
1346          */
1347         mmap_write_lock(mm);
1348 #ifdef CONFIG_FINEGRAINED_THP
1349         result = hugepage_vma_revalidate(mm, address, &vma, hpage_type);
1350 #else
1351         result = hugepage_vma_revalidate(mm, address, &vma);
1352 #endif
1353         if (result)
1354                 goto out;
1355         /* check if the pmd is still valid */
1356         if (mm_find_pmd(mm, address) != pmd)
1357                 goto out;
1358
1359         anon_vma_lock_write(vma->anon_vma);
1360
1361 #ifdef CONFIG_FINEGRAINED_THP
1362         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1363                                 address, address + (hpage_type == THP_TYPE_64KB ?
1364                                 HPAGE_CONT_PTE_SIZE : HPAGE_PMD_SIZE));
1365 #else
1366         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1367                                 address, address + HPAGE_PMD_SIZE);
1368 #endif
1369         mmu_notifier_invalidate_range_start(&range);
1370
1371         pte = pte_offset_map(pmd, address);
1372         pte_ptl = pte_lockptr(mm, pmd);
1373
1374         pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1375         /*
1376          * After this gup_fast can't run anymore. This also removes
1377          * any huge TLB entry from the CPU so we won't allow
1378          * huge and small TLB entries for the same virtual address
1379          * to avoid the risk of CPU bugs in that area.
1380          */
1381 #ifdef CONFIG_FINEGRAINED_THP
1382         if (hpage_type == THP_TYPE_64KB)
1383                 /* FIXME: clearing ptes here causes
1384                  * __collapse_huge_page_isolate and __collapse_huge_page_copy
1385                  * to fail, __collapse_huge_page_copy also clears ptes
1386                  */
1387                 flush_tlb_range(vma, address, address + HPAGE_CONT_PTE_SIZE);
1388         else
1389 #endif /* CONFIG_FINEGRAINED_THP */
1390                 _pmd = pmdp_collapse_flush(vma, address, pmd);
1391         spin_unlock(pmd_ptl);
1392         mmu_notifier_invalidate_range_end(&range);
1393
1394         spin_lock(pte_ptl);
1395 #ifdef CONFIG_FINEGRAINED_THP
1396         isolated = __collapse_huge_page_isolate(vma, address, pte,
1397                         &compound_pagelist, hpage_type);
1398 #else /* CONFIG_FINEGRAINED_THP */
1399         isolated = __collapse_huge_page_isolate(vma, address, pte,
1400                         &compound_pagelist);
1401 #endif /* CONFIG_FINEGRAINED_THP */
1402         spin_unlock(pte_ptl);
1403
1404         if (unlikely(!isolated)) {
1405 #ifdef CONFIG_FINEGRAINED_THP
1406                 if (hpage_type == THP_TYPE_64KB) {
1407                         pte_unmap(pte);
1408                         anon_vma_unlock_write(vma->anon_vma);
1409                         result = SCAN_FAIL;
1410                         goto out;
1411                 }
1412 #endif /* CONFIG_FINEGRAINED_THP */
1413                 pte_unmap(pte);
1414                 spin_lock(pmd_ptl);
1415                 BUG_ON(!pmd_none(*pmd));
1416                 /*
1417                  * We can only use set_pmd_at when establishing
1418                  * hugepmds and never for establishing regular pmds that
1419                  * points to regular pagetables. Use pmd_populate for that
1420                  */
1421                 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1422                 spin_unlock(pmd_ptl);
1423                 anon_vma_unlock_write(vma->anon_vma);
1424                 result = SCAN_FAIL;
1425                 goto out;
1426         }
1427
1428         /*
1429          * All pages are isolated and locked so anon_vma rmap
1430          * can't run anymore.
1431          */
1432         anon_vma_unlock_write(vma->anon_vma);
1433
1434 #ifdef CONFIG_FINEGRAINED_THP
1435         __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1436                         &compound_pagelist, hpage_type);
1437 #else /* CONFIG_FINEGRAINED_THP */
1438         __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1439                         &compound_pagelist);
1440 #endif /* CONFIG_FINEGRAINED_THP */
1441         pte_unmap(pte);
1442         __SetPageUptodate(new_page);
1443
1444 #ifdef CONFIG_FINEGRAINED_THP
1445         if (hpage_type == THP_TYPE_64KB) {
1446                 /* 64KB hugepage */
1447                 _pte = arch_make_huge_pte(new_page, vma);
1448                 _pte = maybe_mkwrite(pte_mkdirty(_pte), vma);
1449         } else {
1450                 /* 2MB hugepage */
1451                 pgtable = pmd_pgtable(_pmd);
1452
1453                 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1454                 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1455         }
1456 #else /* CONFIG_FINEGRAINED_THP */
1457         pgtable = pmd_pgtable(_pmd);
1458
1459         _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1460         _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1461 #endif /* CONFIG_FINEGRAINED_THP */
1462         /*
1463          * spin_lock() below is not the equivalent of smp_wmb(), so
1464          * this is needed to avoid the copy_huge_page writes to become
1465          * visible after the set_pmd_at() write.
1466          */
1467         smp_wmb();
1468
1469         spin_lock(pmd_ptl);
1470 #ifdef CONFIG_FINEGRAINED_THP
1471         if (hpage_type == THP_TYPE_2MB)
1472 #endif
1473                 BUG_ON(!pmd_none(*pmd));
1474         page_add_new_anon_rmap(new_page, vma, address, true);
1475         lru_cache_add_inactive_or_unevictable(new_page, vma);
1476
1477 #ifdef CONFIG_FINEGRAINED_THP
1478         if (hpage_type == THP_TYPE_64KB)
1479                 arch_set_huge_pte_at(mm, address, pte, _pte, 0);
1480         else {
1481                 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1482                 set_pmd_at(mm, address, pmd, _pmd);
1483         }
1484         update_mmu_cache_pmd(vma, address, pmd);
1485 #else /* CONFIG_FINEGRAINED_THP */
1486         pgtable_trans_huge_deposit(mm, pmd, pgtable);
1487         set_pmd_at(mm, address, pmd, _pmd);
1488         update_mmu_cache_pmd(vma, address, pmd);
1489 #endif /* CONFIG_FINEGRAINED_THP */
1490         spin_unlock(pmd_ptl);
1491
1492 #ifdef CONFIG_FINEGRAINED_THP
1493         if (hpage_type == THP_TYPE_2MB)
1494 #endif
1495                 *hpage = NULL;
1496
1497         khugepaged_pages_collapsed++;
1498         result = SCAN_SUCCEED;
1499 out_up_write:
1500         mmap_write_unlock(mm);
1501 out_nolock:
1502         if (!IS_ERR_OR_NULL(*hpage))
1503                 mem_cgroup_uncharge(*hpage);
1504 #ifdef CONFIG_FINEGRAINED_THP
1505         if (result != SCAN_SUCCEED && new_page && hpage_type == THP_TYPE_64KB)
1506                 put_page(new_page);
1507 #endif
1508         trace_mm_collapse_huge_page(mm, isolated, result);
1509         return;
1510 out:
1511         goto out_up_write;
1512 }
1513
1514 #ifdef CONFIG_FINEGRAINED_THP
1515 static int khugepaged_scan_pmd(struct mm_struct *mm,
1516                                struct vm_area_struct *vma,
1517                                unsigned long address,
1518                                struct page **hpage, int hpage_type)
1519 #else /* CONFIG_FINEGRAINED_THP */
1520 static int khugepaged_scan_pmd(struct mm_struct *mm,
1521                                struct vm_area_struct *vma,
1522                                unsigned long address,
1523                                struct page **hpage)
1524 #endif /* CONFIG_FINEGRAINED_THP */
1525 {
1526         pmd_t *pmd;
1527         pte_t *pte, *_pte;
1528         int ret = 0, result = 0, referenced = 0;
1529         int none_or_zero = 0, shared = 0;
1530         struct page *page = NULL;
1531         unsigned long _address;
1532         spinlock_t *ptl;
1533         int node = NUMA_NO_NODE, unmapped = 0;
1534         bool writable = false;
1535
1536 #ifdef CONFIG_FINEGRAINED_THP
1537         int hpage_nr;
1538         int max_ptes_swap, max_ptes_none, max_ptes_shared;
1539
1540         if (hpage_type == THP_TYPE_64KB) {
1541                 VM_BUG_ON(address & ~HPAGE_CONT_PTE_MASK);
1542                 hpage_nr = HPAGE_CONT_PTE_NR;
1543                 max_ptes_swap = khugepaged_max_ptes_swap_64kb;
1544                 max_ptes_none = khugepaged_max_ptes_none_64kb;
1545                 max_ptes_shared = khugepaged_max_ptes_shared_64kb;
1546         } else {
1547                 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1548                 hpage_nr = HPAGE_PMD_NR;
1549                 max_ptes_swap = khugepaged_max_ptes_swap;
1550                 max_ptes_none = khugepaged_max_ptes_none;
1551                 max_ptes_shared = khugepaged_max_ptes_shared;
1552         }
1553 #else /* CONFIG_FINEGRAINED_THP */
1554         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1555 #endif /* CONFIG_FINEGRAINED_THP */
1556
1557         pmd = mm_find_pmd(mm, address);
1558         if (!pmd) {
1559                 result = SCAN_PMD_NULL;
1560                 goto out;
1561         }
1562
1563         memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1564         pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1565         for (_address = address, _pte = pte;
1566 #ifdef CONFIG_FINEGRAINED_THP
1567                 _pte < pte + hpage_nr;
1568 #else
1569                 _pte < pte+HPAGE_PMD_NR;
1570 #endif
1571              _pte++, _address += PAGE_SIZE) {
1572                 pte_t pteval = *_pte;
1573                 if (is_swap_pte(pteval)) {
1574 #ifdef CONFIG_FINEGRAINED_THP
1575                         if (++unmapped <= max_ptes_swap)
1576 #else
1577                         if (++unmapped <= khugepaged_max_ptes_swap)
1578 #endif
1579                         {
1580                                 /*
1581                                  * Always be strict with uffd-wp
1582                                  * enabled swap entries.  Please see
1583                                  * comment below for pte_uffd_wp().
1584                                  */
1585                                 if (pte_swp_uffd_wp(pteval)) {
1586                                         result = SCAN_PTE_UFFD_WP;
1587                                         goto out_unmap;
1588                                 }
1589                                 continue;
1590                         } else {
1591                                 result = SCAN_EXCEED_SWAP_PTE;
1592                                 goto out_unmap;
1593                         }
1594                 }
1595                 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1596                         if (!userfaultfd_armed(vma) &&
1597 #ifdef CONFIG_FINEGRAINED_THP
1598                             ++none_or_zero <= max_ptes_none
1599 #else
1600                             ++none_or_zero <= khugepaged_max_ptes_none
1601 #endif
1602                         )
1603                         {
1604                                 continue;
1605                         } else {
1606                                 result = SCAN_EXCEED_NONE_PTE;
1607                                 goto out_unmap;
1608                         }
1609                 }
1610                 if (!pte_present(pteval)) {
1611                         result = SCAN_PTE_NON_PRESENT;
1612                         goto out_unmap;
1613                 }
1614                 if (pte_uffd_wp(pteval)) {
1615                         /*
1616                          * Don't collapse the page if any of the small
1617                          * PTEs are armed with uffd write protection.
1618                          * Here we can also mark the new huge pmd as
1619                          * write protected if any of the small ones is
1620                          * marked but that could bring uknown
1621                          * userfault messages that falls outside of
1622                          * the registered range.  So, just be simple.
1623                          */
1624                         result = SCAN_PTE_UFFD_WP;
1625                         goto out_unmap;
1626                 }
1627                 if (pte_write(pteval))
1628                         writable = true;
1629
1630                 page = vm_normal_page(vma, _address, pteval);
1631                 if (unlikely(!page)) {
1632                         result = SCAN_PAGE_NULL;
1633                         goto out_unmap;
1634                 }
1635
1636 #ifdef CONFIG_FINEGRAINED_THP
1637                 if (PageCompound(page) && PageTransHuge(compound_head(page))) {
1638                         result = SCAN_PAGE_COMPOUND;
1639                         goto out_unmap;
1640                 }
1641
1642                 if (page_mapcount(page) > 1 &&
1643                                 ++shared > max_ptes_shared)
1644 #else
1645                 if (page_mapcount(page) > 1 &&
1646                                 ++shared > khugepaged_max_ptes_shared)
1647 #endif
1648                 {
1649                         result = SCAN_EXCEED_SHARED_PTE;
1650                         goto out_unmap;
1651                 }
1652
1653                 page = compound_head(page);
1654
1655                 /*
1656                  * Record which node the original page is from and save this
1657                  * information to khugepaged_node_load[].
1658                  * Khupaged will allocate hugepage from the node has the max
1659                  * hit record.
1660                  */
1661                 node = page_to_nid(page);
1662                 if (khugepaged_scan_abort(node)) {
1663                         result = SCAN_SCAN_ABORT;
1664                         goto out_unmap;
1665                 }
1666                 khugepaged_node_load[node]++;
1667                 if (!PageLRU(page)) {
1668                         result = SCAN_PAGE_LRU;
1669                         goto out_unmap;
1670                 }
1671                 if (PageLocked(page)) {
1672                         result = SCAN_PAGE_LOCK;
1673                         goto out_unmap;
1674                 }
1675                 if (!PageAnon(page)) {
1676                         result = SCAN_PAGE_ANON;
1677                         goto out_unmap;
1678                 }
1679
1680                 /*
1681                  * Check if the page has any GUP (or other external) pins.
1682                  *
1683                  * Here the check is racy it may see totmal_mapcount > refcount
1684                  * in some cases.
1685                  * For example, one process with one forked child process.
1686                  * The parent has the PMD split due to MADV_DONTNEED, then
1687                  * the child is trying unmap the whole PMD, but khugepaged
1688                  * may be scanning the parent between the child has
1689                  * PageDoubleMap flag cleared and dec the mapcount.  So
1690                  * khugepaged may see total_mapcount > refcount.
1691                  *
1692                  * But such case is ephemeral we could always retry collapse
1693                  * later.  However it may report false positive if the page
1694                  * has excessive GUP pins (i.e. 512).  Anyway the same check
1695                  * will be done again later the risk seems low.
1696                  */
1697                 if (!is_refcount_suitable(page)) {
1698                         result = SCAN_PAGE_COUNT;
1699                         goto out_unmap;
1700                 }
1701                 if (pte_young(pteval) ||
1702                     page_is_young(page) || PageReferenced(page) ||
1703                     mmu_notifier_test_young(vma->vm_mm, address))
1704                         referenced++;
1705         }
1706         if (!writable) {
1707                 result = SCAN_PAGE_RO;
1708         } else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1709                 result = SCAN_LACK_REFERENCED_PAGE;
1710         } else {
1711                 result = SCAN_SUCCEED;
1712                 ret = 1;
1713         }
1714 out_unmap:
1715         pte_unmap_unlock(pte, ptl);
1716         if (ret) {
1717                 node = khugepaged_find_target_node();
1718                 /* collapse_huge_page will return with the mmap_lock released */
1719 #ifdef CONFIG_FINEGRAINED_THP
1720                 collapse_huge_page(mm, address, hpage, node,
1721                                 referenced, unmapped, hpage_type);
1722 #else
1723                 collapse_huge_page(mm, address, hpage, node,
1724                                 referenced, unmapped);
1725 #endif
1726         }
1727 out:
1728         trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1729                                      none_or_zero, result, unmapped);
1730         return ret;
1731 }
1732
1733 static void collect_mm_slot(struct mm_slot *mm_slot)
1734 {
1735         struct mm_struct *mm = mm_slot->mm;
1736
1737         lockdep_assert_held(&khugepaged_mm_lock);
1738
1739         if (khugepaged_test_exit(mm)) {
1740 #ifdef CONFIG_FINEGRAINED_THP
1741                 clear_hint_list(mm_slot);
1742 #endif
1743                 /* free mm_slot */
1744                 hash_del(&mm_slot->hash);
1745                 list_del(&mm_slot->mm_node);
1746
1747                 /*
1748                  * Not strictly needed because the mm exited already.
1749                  *
1750                  * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1751                  */
1752
1753                 /* khugepaged_mm_lock actually not necessary for the below */
1754                 free_mm_slot(mm_slot);
1755                 mmdrop(mm);
1756         }
1757 }
1758
1759 #ifdef CONFIG_SHMEM
1760 /*
1761  * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1762  * khugepaged should try to collapse the page table.
1763  */
1764 #ifdef CONFIG_FINEGRAINED_THP
1765 static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1766                                          unsigned long addr, int hpage_type)
1767 #else
1768 static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1769                                          unsigned long addr)
1770 #endif
1771 {
1772         struct mm_slot *mm_slot;
1773
1774 #ifdef CONFIG_FINEGRAINED_THP
1775         VM_BUG_ON(addr & (hpage_type == THP_TYPE_64KB ?
1776                                         ~HPAGE_CONT_PTE_MASK :~HPAGE_PMD_MASK));
1777 #else
1778         VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1779 #endif
1780
1781         spin_lock(&khugepaged_mm_lock);
1782         mm_slot = get_mm_slot(mm);
1783 #ifdef CONFIG_FINEGRAINED_THP
1784         if (hpage_type == THP_TYPE_64KB)
1785                 addr |= 0x01;
1786 #endif
1787         if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1788                 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1789         spin_unlock(&khugepaged_mm_lock);
1790         return 0;
1791 }
1792
1793 /**
1794  * Try to collapse a pte-mapped THP for mm at address haddr.
1795  *
1796  * This function checks whether all the PTEs in the PMD are pointing to the
1797  * right THP. If so, retract the page table so the THP can refault in with
1798  * as pmd-mapped.
1799  */
1800 void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1801 {
1802         unsigned long haddr = addr & HPAGE_PMD_MASK;
1803         struct vm_area_struct *vma = find_vma(mm, haddr);
1804         struct page *hpage;
1805         pte_t *start_pte, *pte;
1806         pmd_t *pmd, _pmd;
1807         spinlock_t *ptl;
1808         int count = 0;
1809         int i;
1810 #ifdef CONFIG_FINEGRAINED_THP
1811         int hpage_type = (addr & 0x01) ? THP_TYPE_64KB : THP_TYPE_2MB;
1812         int hpage_nr = (hpage_type == THP_TYPE_64KB) ?
1813                                                         HPAGE_CONT_PTE_NR : HPAGE_PMD_NR;
1814         int hpage_size = (hpage_type == THP_TYPE_64KB) ?
1815                                                         HPAGE_CONT_PTE_SIZE : HPAGE_PMD_SIZE;
1816
1817         if (hpage_type == THP_TYPE_64KB)
1818                 haddr = addr & HPAGE_CONT_PTE_MASK;
1819 #endif
1820
1821 #ifdef CONFIG_FINEGRAINED_THP
1822         if (!vma || !vma->vm_file ||
1823             vma->vm_start > haddr || vma->vm_end < haddr + hpage_size)
1824                 return;
1825 #else /* CONFIG_FINEGRAINED_THP */
1826         if (!vma || !vma->vm_file ||
1827             vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
1828                 return;
1829 #endif /* CONFIG_FINEGRAINED_THP */
1830
1831         /*
1832          * This vm_flags may not have VM_HUGEPAGE if the page was not
1833          * collapsed by this mm. But we can still collapse if the page is
1834          * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1835          * will not fail the vma for missing VM_HUGEPAGE
1836          */
1837         if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1838                 return;
1839
1840         hpage = find_lock_page(vma->vm_file->f_mapping,
1841                                linear_page_index(vma, haddr));
1842         if (!hpage)
1843                 return;
1844
1845         if (!PageHead(hpage))
1846                 goto drop_hpage;
1847
1848         pmd = mm_find_pmd(mm, haddr);
1849         if (!pmd)
1850                 goto drop_hpage;
1851
1852         start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1853 #ifdef CONFIG_FINEGRAINED_THP
1854         if (pte_cont(*start_pte)) {
1855                 pte_unmap_unlock(start_pte, ptl);
1856                 goto drop_hpage;
1857         }
1858 #endif
1859
1860         /* step 1: check all mapped PTEs are to the right huge page */
1861         for (i = 0, addr = haddr, pte = start_pte;
1862 #ifdef CONFIG_FINEGRAINED_THP
1863              i < hpage_nr;
1864 #else
1865              i < HPAGE_PMD_NR;
1866 #endif
1867              i++, addr += PAGE_SIZE, pte++) {
1868                 struct page *page;
1869
1870                 /* empty pte, skip */
1871                 if (pte_none(*pte))
1872                         continue;
1873
1874                 /* page swapped out, abort */
1875                 if (!pte_present(*pte))
1876                         goto abort;
1877
1878                 page = vm_normal_page(vma, addr, *pte);
1879
1880                 /*
1881                  * Note that uprobe, debugger, or MAP_PRIVATE may change the
1882                  * page table, but the new page will not be a subpage of hpage.
1883                  */
1884                 if (hpage + i != page)
1885                         goto abort;
1886                 count++;
1887         }
1888
1889         /* step 2: adjust rmap */
1890         for (i = 0, addr = haddr, pte = start_pte;
1891 #ifdef CONFIG_FINEGRAINED_THP
1892                 i < hpage_nr;
1893 #else
1894             i < HPAGE_PMD_NR;
1895 #endif
1896              i++, addr += PAGE_SIZE, pte++) {
1897                 struct page *page;
1898
1899                 if (pte_none(*pte))
1900                         continue;
1901                 page = vm_normal_page(vma, addr, *pte);
1902                 page_remove_rmap(page, false);
1903         }
1904
1905         pte_unmap_unlock(start_pte, ptl);
1906
1907         /* step 3: set proper refcount and mm_counters. */
1908         if (count) {
1909                 page_ref_sub(hpage, count);
1910                 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1911         }
1912
1913         /* step 4: collapse pmd */
1914         ptl = pmd_lock(vma->vm_mm, pmd);
1915 #ifdef CONFIG_FINEGRAINED_THP
1916         if (hpage_type == THP_TYPE_64KB) {
1917                 pte_t *ptep = pte_offset_map(pmd, haddr);
1918                 arch_clear_huge_pte_range(vma->vm_mm, haddr, ptep);
1919                 spin_unlock(ptl);
1920         } else {
1921                 _pmd = pmdp_collapse_flush(vma, haddr, pmd);
1922                 spin_unlock(ptl);
1923                 mm_dec_nr_ptes(mm);
1924                 pte_free(mm, pmd_pgtable(_pmd));
1925         }
1926 #else /* CONFIG_FINEGRAINED_THP*/
1927         _pmd = pmdp_collapse_flush(vma, haddr, pmd);
1928         spin_unlock(ptl);
1929         mm_dec_nr_ptes(mm);
1930         pte_free(mm, pmd_pgtable(_pmd));
1931 #endif /* CONFIG_FINEGRAINED_THP */
1932
1933 drop_hpage:
1934         unlock_page(hpage);
1935         put_page(hpage);
1936         return;
1937
1938 abort:
1939         pte_unmap_unlock(start_pte, ptl);
1940         goto drop_hpage;
1941 }
1942
1943 static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1944 {
1945         struct mm_struct *mm = mm_slot->mm;
1946         int i;
1947
1948         if (likely(mm_slot->nr_pte_mapped_thp == 0))
1949                 return 0;
1950
1951         if (!mmap_write_trylock(mm))
1952                 return -EBUSY;
1953
1954         if (unlikely(khugepaged_test_exit(mm)))
1955                 goto out;
1956
1957         for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1958                 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1959
1960 out:
1961         mm_slot->nr_pte_mapped_thp = 0;
1962         mmap_write_unlock(mm);
1963         return 0;
1964 }
1965
1966 #ifdef CONFIG_FINEGRAINED_THP
1967 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
1968                                                         int hpage_type)
1969 #else
1970 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1971 #endif
1972 {
1973         struct vm_area_struct *vma;
1974         struct mm_struct *mm;
1975         unsigned long addr;
1976         pmd_t *pmd, _pmd;
1977 #ifdef CONFIG_FINEGRAINED_THP
1978         pte_t *ptep;
1979         int hpage_size = (hpage_type == THP_TYPE_64KB) ?
1980                                 HPAGE_CONT_PTE_SIZE : HPAGE_PMD_SIZE;
1981 #endif /* CONFIG_FINEGRAINED_THP */
1982
1983         i_mmap_lock_write(mapping);
1984         vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1985                 /*
1986                  * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1987                  * got written to. These VMAs are likely not worth investing
1988                  * mmap_write_lock(mm) as PMD-mapping is likely to be split
1989                  * later.
1990                  *
1991                  * Not that vma->anon_vma check is racy: it can be set up after
1992                  * the check but before we took mmap_lock by the fault path.
1993                  * But page lock would prevent establishing any new ptes of the
1994                  * page, so we are safe.
1995                  *
1996                  * An alternative would be drop the check, but check that page
1997                  * table is clear before calling pmdp_collapse_flush() under
1998                  * ptl. It has higher chance to recover THP for the VMA, but
1999                  * has higher cost too.
2000                  */
2001                 if (vma->anon_vma)
2002                         continue;
2003                 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
2004 #ifdef CONFIG_FINEGRAINED_THP
2005                 if (hpage_type == THP_TYPE_64KB && addr & ~HPAGE_CONT_PTE_MASK)
2006                         continue;
2007                 else if (hpage_type == THP_TYPE_2MB && addr & ~HPAGE_PMD_MASK)
2008                         continue;
2009                 if (vma->vm_end < addr + hpage_size)
2010                         continue;
2011
2012                 mm = vma->vm_mm;
2013                 pmd = mm_find_pmd(mm, addr);
2014                 if (!pmd)
2015                         continue;
2016                 if (mmap_write_trylock(mm)) {
2017                         spinlock_t *ptl = pmd_lock(mm, pmd);
2018                         if (hpage_type == THP_TYPE_64KB) {
2019                                 /* 64KB hugepage */
2020                                 ptep = pte_offset_map(pmd, addr);
2021                                 /* pte maps are established on page fault handling */
2022                                 arch_clear_huge_pte_range(mm, addr, ptep);
2023                                 spin_unlock(ptl);
2024                         } else {
2025                                 /* 2MB hugepage */
2026                                 /*
2027                                  * We need exclusive mmap_sem to retract page table.
2028                                  *
2029                                  * We use trylock due to lock inversion: we need to acquire
2030                                  * mmap_sem while holding page lock. Fault path does it in
2031                                  * reverse order. Trylock is a way to avoid deadlock.
2032                                  */
2033                                 _pmd = pmdp_collapse_flush(vma, addr, pmd);
2034                                 spin_unlock(ptl);
2035
2036                                 mm_dec_nr_ptes(mm);
2037                                 pte_free(mm, pmd_pgtable(_pmd));
2038                         }
2039                         mmap_write_unlock(mm);
2040                 } else
2041                         khugepaged_add_pte_mapped_thp(vma->vm_mm, addr, hpage_type);
2042 #else /* CONFIG_FINEGRAINED_THP */
2043                 if (addr & ~HPAGE_PMD_MASK)
2044                         continue;
2045                 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
2046                         continue;
2047                 mm = vma->vm_mm;
2048                 pmd = mm_find_pmd(mm, addr);
2049                 if (!pmd)
2050                         continue;
2051                 /*
2052                  * We need exclusive mmap_lock to retract page table.
2053                  *
2054                  * We use trylock due to lock inversion: we need to acquire
2055                  * mmap_lock while holding page lock. Fault path does it in
2056                  * reverse order. Trylock is a way to avoid deadlock.
2057                  */
2058                 if (mmap_write_trylock(mm)) {
2059                         if (!khugepaged_test_exit(mm)) {
2060                                 spinlock_t *ptl = pmd_lock(mm, pmd);
2061                                 /* assume page table is clear */
2062                                 _pmd = pmdp_collapse_flush(vma, addr, pmd);
2063                                 spin_unlock(ptl);
2064                                 mm_dec_nr_ptes(mm);
2065                                 pte_free(mm, pmd_pgtable(_pmd));
2066                         }
2067                         mmap_write_unlock(mm);
2068                 } else {
2069                         /* Try again later */
2070                         khugepaged_add_pte_mapped_thp(mm, addr);
2071                 }
2072 #endif /* CONFIG_FINEGRAINED_THP */
2073         }
2074         i_mmap_unlock_write(mapping);
2075 }
2076
2077 /**
2078  * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
2079  *
2080  * Basic scheme is simple, details are more complex:
2081  *  - allocate and lock a new huge page;
2082  *  - scan page cache replacing old pages with the new one
2083  *    + swap/gup in pages if necessary;
2084  *    + fill in gaps;
2085  *    + keep old pages around in case rollback is required;
2086  *  - if replacing succeeds:
2087  *    + copy data over;
2088  *    + free old pages;
2089  *    + unlock huge page;
2090  *  - if replacing failed;
2091  *    + put all pages back and unfreeze them;
2092  *    + restore gaps in the page cache;
2093  *    + unlock and free huge page;
2094  */
2095 #ifdef CONFIG_FINEGRAINED_THP
2096 static void collapse_file(struct mm_struct *mm,
2097                 struct file *file, pgoff_t start,
2098                 struct page **hpage, int node, int hpage_type)
2099 #else /* CONFIG_FINEGRAINED_THP */
2100 static void collapse_file(struct mm_struct *mm,
2101                 struct file *file, pgoff_t start,
2102                 struct page **hpage, int node)
2103 #endif /* CONFIG_FINEGRAINED_THP */
2104 {
2105         struct address_space *mapping = file->f_mapping;
2106         gfp_t gfp;
2107         struct page *new_page;
2108 #ifdef CONFIG_FINEGRAINED_THP
2109         int hpage_nr = (hpage_type == THP_TYPE_64KB ?
2110                                         HPAGE_CONT_PTE_NR : HPAGE_PMD_NR);
2111         int hpage_order = (hpage_type == THP_TYPE_64KB ?
2112                                         HPAGE_CONT_PTE_ORDER : HPAGE_PMD_ORDER);
2113         pgoff_t index, end = start + hpage_nr;
2114 #else /* CONFIG_FINEGRAINED_THP */
2115         pgoff_t index, end = start + HPAGE_PMD_NR;
2116 #endif /* CONFIG_FINEGRAINED_THP */
2117         LIST_HEAD(pagelist);
2118 #ifdef CONFIG_FINEGRAINED_THP
2119         XA_STATE_ORDER(xas, &mapping->i_pages, start, hpage_order);
2120 #else
2121         XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
2122 #endif
2123         int nr_none = 0, result = SCAN_SUCCEED;
2124         bool is_shmem = shmem_file(file);
2125
2126         VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
2127 #ifdef CONFIG_FINEGRAINED_THP
2128         VM_BUG_ON(start & (hpage_nr - 1));
2129 #else
2130         VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
2131 #endif
2132
2133         /* Only allocate from the target node */
2134         gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
2135
2136 #ifdef CONFIG_FINEGRAINED_THP
2137         new_page = khugepaged_alloc_page(hpage, gfp, node, hpage_type);
2138 #else
2139         new_page = khugepaged_alloc_page(hpage, gfp, node);
2140 #endif
2141         if (!new_page) {
2142                 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
2143                 goto out;
2144         }
2145
2146         if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
2147                 result = SCAN_CGROUP_CHARGE_FAIL;
2148                 goto out;
2149         }
2150         count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
2151
2152         /* This will be less messy when we use multi-index entries */
2153         do {
2154                 xas_lock_irq(&xas);
2155                 xas_create_range(&xas);
2156                 if (!xas_error(&xas))
2157                         break;
2158                 xas_unlock_irq(&xas);
2159                 if (!xas_nomem(&xas, GFP_KERNEL)) {
2160                         result = SCAN_FAIL;
2161                         goto out;
2162                 }
2163         } while (1);
2164
2165         __SetPageLocked(new_page);
2166         if (is_shmem)
2167                 __SetPageSwapBacked(new_page);
2168         new_page->index = start;
2169         new_page->mapping = mapping;
2170
2171         /*
2172          * At this point the new_page is locked and not up-to-date.
2173          * It's safe to insert it into the page cache, because nobody would
2174          * be able to map it or use it in another way until we unlock it.
2175          */
2176
2177         xas_set(&xas, start);
2178         for (index = start; index < end; index++) {
2179                 struct page *page = xas_next(&xas);
2180
2181                 VM_BUG_ON(index != xas.xa_index);
2182                 if (is_shmem) {
2183                         if (!page) {
2184                                 /*
2185                                  * Stop if extent has been truncated or
2186                                  * hole-punched, and is now completely
2187                                  * empty.
2188                                  */
2189                                 if (index == start) {
2190                                         if (!xas_next_entry(&xas, end - 1)) {
2191                                                 result = SCAN_TRUNCATED;
2192                                                 goto xa_locked;
2193                                         }
2194                                         xas_set(&xas, index);
2195                                 }
2196                                 if (!shmem_charge(mapping->host, 1)) {
2197                                         result = SCAN_FAIL;
2198                                         goto xa_locked;
2199                                 }
2200                                 xas_store(&xas, new_page);
2201                                 nr_none++;
2202                                 continue;
2203                         }
2204
2205                         if (xa_is_value(page) || !PageUptodate(page)) {
2206                                 xas_unlock_irq(&xas);
2207                                 /* swap in or instantiate fallocated page */
2208                                 if (shmem_getpage(mapping->host, index, &page,
2209                                                   SGP_NOHUGE)) {
2210                                         result = SCAN_FAIL;
2211                                         goto xa_unlocked;
2212                                 }
2213                         } else if (trylock_page(page)) {
2214                                 get_page(page);
2215                                 xas_unlock_irq(&xas);
2216                         } else {
2217                                 result = SCAN_PAGE_LOCK;
2218                                 goto xa_locked;
2219                         }
2220                 } else {        /* !is_shmem */
2221                         if (!page || xa_is_value(page)) {
2222                                 xas_unlock_irq(&xas);
2223                                 page_cache_sync_readahead(mapping, &file->f_ra,
2224                                                           file, index,
2225                                                           end - index);
2226                                 /* drain pagevecs to help isolate_lru_page() */
2227                                 lru_add_drain();
2228                                 page = find_lock_page(mapping, index);
2229                                 if (unlikely(page == NULL)) {
2230                                         result = SCAN_FAIL;
2231                                         goto xa_unlocked;
2232                                 }
2233                         } else if (PageDirty(page)) {
2234                                 /*
2235                                  * khugepaged only works on read-only fd,
2236                                  * so this page is dirty because it hasn't
2237                                  * been flushed since first write. There
2238                                  * won't be new dirty pages.
2239                                  *
2240                                  * Trigger async flush here and hope the
2241                                  * writeback is done when khugepaged
2242                                  * revisits this page.
2243                                  *
2244                                  * This is a one-off situation. We are not
2245                                  * forcing writeback in loop.
2246                                  */
2247                                 xas_unlock_irq(&xas);
2248                                 filemap_flush(mapping);
2249                                 result = SCAN_FAIL;
2250                                 goto xa_unlocked;
2251                         } else if (trylock_page(page)) {
2252                                 get_page(page);
2253                                 xas_unlock_irq(&xas);
2254                         } else {
2255                                 result = SCAN_PAGE_LOCK;
2256                                 goto xa_locked;
2257                         }
2258                 }
2259
2260                 /*
2261                  * The page must be locked, so we can drop the i_pages lock
2262                  * without racing with truncate.
2263                  */
2264                 VM_BUG_ON_PAGE(!PageLocked(page), page);
2265
2266                 /* make sure the page is up to date */
2267                 if (unlikely(!PageUptodate(page))) {
2268                         result = SCAN_FAIL;
2269                         goto out_unlock;
2270                 }
2271
2272                 /*
2273                  * If file was truncated then extended, or hole-punched, before
2274                  * we locked the first page, then a THP might be there already.
2275                  */
2276                 if (PageTransCompound(page)) {
2277                         result = SCAN_PAGE_COMPOUND;
2278                         goto out_unlock;
2279                 }
2280
2281                 if (page_mapping(page) != mapping) {
2282                         result = SCAN_TRUNCATED;
2283                         goto out_unlock;
2284                 }
2285
2286                 if (!is_shmem && PageDirty(page)) {
2287                         /*
2288                          * khugepaged only works on read-only fd, so this
2289                          * page is dirty because it hasn't been flushed
2290                          * since first write.
2291                          */
2292                         result = SCAN_FAIL;
2293                         goto out_unlock;
2294                 }
2295
2296                 if (isolate_lru_page(page)) {
2297                         result = SCAN_DEL_PAGE_LRU;
2298                         goto out_unlock;
2299                 }
2300
2301                 if (page_has_private(page) &&
2302                     !try_to_release_page(page, GFP_KERNEL)) {
2303                         result = SCAN_PAGE_HAS_PRIVATE;
2304                         putback_lru_page(page);
2305                         goto out_unlock;
2306                 }
2307
2308                 if (page_mapped(page))
2309                         unmap_mapping_pages(mapping, index, 1, false);
2310
2311                 xas_lock_irq(&xas);
2312                 xas_set(&xas, index);
2313
2314                 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
2315                 VM_BUG_ON_PAGE(page_mapped(page), page);
2316
2317                 /*
2318                  * The page is expected to have page_count() == 3:
2319                  *  - we hold a pin on it;
2320                  *  - one reference from page cache;
2321                  *  - one from isolate_lru_page;
2322                  */
2323                 if (!page_ref_freeze(page, 3)) {
2324                         result = SCAN_PAGE_COUNT;
2325                         xas_unlock_irq(&xas);
2326                         putback_lru_page(page);
2327                         goto out_unlock;
2328                 }
2329
2330                 /*
2331                  * Add the page to the list to be able to undo the collapse if
2332                  * something go wrong.
2333                  */
2334                 list_add_tail(&page->lru, &pagelist);
2335
2336                 /* Finally, replace with the new page. */
2337                 xas_store(&xas, new_page);
2338                 continue;
2339 out_unlock:
2340                 unlock_page(page);
2341                 put_page(page);
2342                 goto xa_unlocked;
2343         }
2344
2345         if (is_shmem)
2346 #ifdef CONFIG_FINEGRAINED_THP
2347                 if (hpage_type == THP_TYPE_64KB)
2348                         __inc_node_page_state(new_page, NR_SHMEM_64KB_THPS);
2349                 else
2350                         __inc_node_page_state(new_page, NR_SHMEM_THPS);
2351 #else /* CONFIG_FINEGRAINED_THP */
2352                 __inc_node_page_state(new_page, NR_SHMEM_THPS);
2353 #endif /* CONFIG_FINEGRAINED_THP */
2354         else {
2355 #ifdef CONFIG_FINEGRAINED_THP
2356                 if (hpage_type == THP_TYPE_64KB)
2357                         __inc_node_page_state(new_page, NR_FILE_64KB_THPS);
2358                 else
2359                         __inc_node_page_state(new_page, NR_FILE_THPS);
2360 #else /* CONFIG_FINEGRAINED_THP */
2361                 __inc_node_page_state(new_page, NR_FILE_THPS);
2362 #endif /* CONFIG_FINEGRAINED_THP */
2363                 filemap_nr_thps_inc(mapping);
2364         }
2365
2366         if (nr_none) {
2367                 __mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
2368                 if (is_shmem)
2369                         __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
2370         }
2371
2372 xa_locked:
2373         xas_unlock_irq(&xas);
2374 xa_unlocked:
2375
2376         if (result == SCAN_SUCCEED) {
2377                 struct page *page, *tmp;
2378 #ifdef CONFIG_FINEGRAINED_THP
2379                 int offset = 0;
2380 #endif
2381
2382                 /*
2383                  * Replacing old pages with new one has succeeded, now we
2384                  * need to copy the content and free the old pages.
2385                  */
2386                 index = start;
2387                 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2388 #ifdef CONFIG_FINEGRAINED_THP
2389                         if (hpage_type != THP_TYPE_64KB) {
2390                                 while (index < page->index) {
2391                                         clear_highpage(new_page + (index % HPAGE_PMD_NR));
2392                                         index++;
2393                                 }
2394                         }
2395
2396                         if (hpage_type == THP_TYPE_64KB) {
2397                                 copy_highpage(new_page + offset, page);
2398                                 offset++;
2399                         } else
2400                                 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
2401                                                 page);
2402 #else /* CONFIG_FINEGRAINED_THP */
2403                         while (index < page->index) {
2404                                 clear_highpage(new_page + (index % HPAGE_PMD_NR));
2405                                 index++;
2406                         }
2407                         copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
2408                                         page);
2409 #endif /* CONFIG_FINEGRAINED_THP */
2410                         list_del(&page->lru);
2411                         page->mapping = NULL;
2412                         page_ref_unfreeze(page, 1);
2413                         ClearPageActive(page);
2414                         ClearPageUnevictable(page);
2415                         unlock_page(page);
2416                         put_page(page);
2417                         index++;
2418                 }
2419 #ifdef CONFIG_FINEGRAINED_THP
2420                 if (hpage_type == THP_TYPE_64KB) {
2421                         while (index < end) {
2422                                 clear_highpage(new_page + offset);
2423                                 offset++;
2424                                 index++;
2425                         }
2426                 } else {
2427                         while (index < end) {
2428                                 clear_highpage(new_page + (index % HPAGE_PMD_NR));
2429                                 index++;
2430                         }
2431                 }
2432 #else /* CONFIG_FINEGRAINED_THP */
2433                 while (index < end) {
2434                         clear_highpage(new_page + (index % HPAGE_PMD_NR));
2435                         index++;
2436                 }
2437 #endif /* CONFIG_FINEGRAINED_THP */
2438
2439                 SetPageUptodate(new_page);
2440 #ifdef CONFIG_FINEGRAINED_THP
2441                 page_ref_add(new_page, hpage_nr - 1);
2442 #else
2443                 page_ref_add(new_page, HPAGE_PMD_NR - 1);
2444 #endif
2445                 if (is_shmem)
2446                         set_page_dirty(new_page);
2447                 lru_cache_add(new_page);
2448
2449                 /*
2450                  * Remove pte page tables, so we can re-fault the page as huge.
2451                  */
2452 #ifdef CONFIG_FINEGRAINED_THP
2453                 retract_page_tables(mapping, start, hpage_type);
2454                 if (hpage_type == THP_TYPE_2MB)
2455                         *hpage = NULL;
2456 #else /* CONFIG_FINEGRAINED_THP */
2457                 retract_page_tables(mapping, start);
2458                 *hpage = NULL;
2459 #endif /* CONFIG_FINEGRAINED_THP */
2460                 khugepaged_pages_collapsed++;
2461         } else {
2462                 struct page *page;
2463
2464                 /* Something went wrong: roll back page cache changes */
2465                 xas_lock_irq(&xas);
2466                 mapping->nrpages -= nr_none;
2467
2468                 if (is_shmem)
2469                         shmem_uncharge(mapping->host, nr_none);
2470
2471                 xas_set(&xas, start);
2472                 xas_for_each(&xas, page, end - 1) {
2473                         page = list_first_entry_or_null(&pagelist,
2474                                         struct page, lru);
2475                         if (!page || xas.xa_index < page->index) {
2476                                 if (!nr_none)
2477                                         break;
2478                                 nr_none--;
2479                                 /* Put holes back where they were */
2480                                 xas_store(&xas, NULL);
2481                                 continue;
2482                         }
2483
2484                         VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
2485
2486                         /* Unfreeze the page. */
2487                         list_del(&page->lru);
2488                         page_ref_unfreeze(page, 2);
2489                         xas_store(&xas, page);
2490                         xas_pause(&xas);
2491                         xas_unlock_irq(&xas);
2492                         unlock_page(page);
2493                         putback_lru_page(page);
2494                         xas_lock_irq(&xas);
2495                 }
2496                 VM_BUG_ON(nr_none);
2497                 xas_unlock_irq(&xas);
2498
2499                 new_page->mapping = NULL;
2500         }
2501
2502         unlock_page(new_page);
2503 out:
2504 #ifdef CONFIG_FINEGRAINED_THP
2505         if (result != SCAN_SUCCEED && new_page && hpage_type == THP_TYPE_64KB)
2506                 put_page(new_page);
2507 #endif
2508         VM_BUG_ON(!list_empty(&pagelist));
2509         if (!IS_ERR_OR_NULL(*hpage))
2510                 mem_cgroup_uncharge(*hpage);
2511         /* TODO: tracepoints */
2512 }
2513
2514 #ifdef CONFIG_FINEGRAINED_THP
2515 static void khugepaged_scan_file(struct mm_struct *mm,
2516                 struct file *file, pgoff_t start, struct page **hpage,
2517                 int hpage_type)
2518 #else /* CONFIG_FINEGRAINED_THP */
2519 static void khugepaged_scan_file(struct mm_struct *mm,
2520                 struct file *file, pgoff_t start, struct page **hpage)
2521 #endif /* CONFIG_FINEGRAINED_THP */
2522 {
2523         struct page *page = NULL;
2524         struct address_space *mapping = file->f_mapping;
2525         XA_STATE(xas, &mapping->i_pages, start);
2526         int present, swap;
2527         int node = NUMA_NO_NODE;
2528         int result = SCAN_SUCCEED;
2529 #ifdef CONFIG_FINEGRAINED_THP
2530         int hpage_nr;
2531         int max_ptes_swap, max_ptes_none, max_ptes_shared;
2532
2533         if (hpage_type == THP_TYPE_64KB) {
2534                 hpage_nr = HPAGE_CONT_PTE_NR; /* 64KB */
2535                 max_ptes_swap = khugepaged_max_ptes_swap_64kb;
2536                 max_ptes_none = khugepaged_max_ptes_none_64kb;
2537                 max_ptes_shared = khugepaged_max_ptes_shared_64kb;
2538         } else {
2539                 hpage_nr = HPAGE_PMD_NR; /* 2MB */
2540                 max_ptes_swap = khugepaged_max_ptes_swap;
2541                 max_ptes_none = khugepaged_max_ptes_none;
2542                 max_ptes_shared = khugepaged_max_ptes_shared;
2543         }
2544 #endif /* CONFIG_FINEGRAINED_THP */
2545
2546         present = 0;
2547         swap = 0;
2548         memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
2549         rcu_read_lock();
2550 #ifdef CONFIG_FINEGRAINED_THP
2551         xas_for_each(&xas, page, start + hpage_nr - 1)
2552 #else
2553         xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1)
2554 #endif
2555         {
2556                 if (xas_retry(&xas, page))
2557                         continue;
2558
2559                 if (xa_is_value(page)) {
2560 #ifdef CONFIG_FINEGRAINED_THP
2561                         if (++swap > max_ptes_swap)
2562 #else
2563                         if (++swap > khugepaged_max_ptes_swap)
2564 #endif
2565                         {
2566                                 result = SCAN_EXCEED_SWAP_PTE;
2567                                 break;
2568                         }
2569                         continue;
2570                 }
2571
2572                 if (PageTransCompound(page)) {
2573                         result = SCAN_PAGE_COMPOUND;
2574                         break;
2575                 }
2576
2577                 node = page_to_nid(page);
2578                 if (khugepaged_scan_abort(node)) {
2579                         result = SCAN_SCAN_ABORT;
2580                         break;
2581                 }
2582                 khugepaged_node_load[node]++;
2583
2584                 if (!PageLRU(page)) {
2585                         result = SCAN_PAGE_LRU;
2586                         break;
2587                 }
2588
2589                 if (page_count(page) !=
2590                     1 + page_mapcount(page) + page_has_private(page)) {
2591                         result = SCAN_PAGE_COUNT;
2592                         break;
2593                 }
2594
2595                 /*
2596                  * We probably should check if the page is referenced here, but
2597                  * nobody would transfer pte_young() to PageReferenced() for us.
2598                  * And rmap walk here is just too costly...
2599                  */
2600
2601                 present++;
2602
2603                 if (need_resched()) {
2604                         xas_pause(&xas);
2605                         cond_resched_rcu();
2606                 }
2607         }
2608         rcu_read_unlock();
2609
2610         if (result == SCAN_SUCCEED) {
2611 #ifdef CONFIG_FINEGRAINED_THP
2612                 if (present < hpage_nr - max_ptes_none)
2613 #else
2614                 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none)
2615 #endif
2616                 {
2617                         result = SCAN_EXCEED_NONE_PTE;
2618                 } else {
2619                         node = khugepaged_find_target_node();
2620 #ifdef CONFIG_FINEGRAINED_THP
2621                         collapse_file(mm, file, start, hpage, node, hpage_type);
2622 #else
2623                         collapse_file(mm, file, start, hpage, node);
2624 #endif
2625                 }
2626         }
2627
2628         /* TODO: tracepoints */
2629 }
2630 #else
2631 #ifdef CONFIG_FINEGRAINED_THP
2632 static void khugepaged_scan_file(struct mm_struct *mm,
2633                 struct file *file, pgoff_t start, struct page **hpage,
2634                 int hpage_type)
2635 #else /* CONFIG_FINEGRAINED_THP */
2636 static void khugepaged_scan_file(struct mm_struct *mm,
2637                 struct file *file, pgoff_t start, struct page **hpage)
2638 #endif /* CONFIG_FINEGRAINED_THP */
2639 {
2640         BUILD_BUG();
2641 }
2642
2643 static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
2644 {
2645         return 0;
2646 }
2647 #endif
2648
2649 #ifdef CONFIG_FINEGRAINED_THP
2650 /*
2651  * if return value > 0 -> vma can make hugepage
2652  *    calculated hugepage start and hugepage end are stored in pointers
2653  * otherwise -> vma cannot make hugepage
2654  */
2655 static inline int hugepage_determine_htype(unsigned long vm_start,
2656                 unsigned long vm_end, unsigned long *hstart, unsigned long *hend) {
2657         unsigned long start, end;
2658
2659         /* determine 2MB hugepage */
2660         start = (vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2661         end = vm_end & HPAGE_PMD_MASK;
2662         if (start >= end) {
2663                 /* determine 64KB hugepage */
2664                 start = (vm_start + ~HPAGE_CONT_PTE_MASK) & HPAGE_CONT_PTE_MASK;
2665                 end = vm_end & HPAGE_CONT_PTE_MASK;
2666                 if (start >= end)
2667                         return THP_TYPE_FAIL;
2668                 *hstart = start;
2669                 *hend = end;
2670                 return THP_TYPE_64KB;
2671         }
2672         *hstart = start;
2673         *hend = end;
2674         return THP_TYPE_2MB;
2675 }
2676
2677 enum {
2678         KHUGEPAGE_SCAN_CONTINUE,
2679         KHUGEPAGE_SCAN_BREAK,
2680         KHUGEPAGE_SCAN_BREAK_MMAP_LOCK,
2681 };
2682
2683 static unsigned int khugepaged_scan_vma(struct mm_struct *mm,
2684                         struct vm_area_struct *vma, struct page **hpage,
2685                         unsigned int pages, int *progress)
2686 {
2687         unsigned long hstart, hend;
2688         int hpage_type, ret;
2689         int hpage_size, hpage_nr;
2690
2691         if (!hugepage_vma_check(vma, vma->vm_flags))
2692                 return KHUGEPAGE_SCAN_CONTINUE;
2693
2694         hpage_type = hugepage_determine_htype(
2695                                 (vma->vm_start > khugepaged_scan.address) ?
2696                                 vma->vm_start : khugepaged_scan.address,
2697                                 vma->vm_end, &hstart, &hend);
2698
2699         if (hpage_type == THP_TYPE_FAIL)
2700                 return KHUGEPAGE_SCAN_CONTINUE;
2701         if (khugepaged_scan.address > hend)
2702                 return KHUGEPAGE_SCAN_CONTINUE;
2703         if (khugepaged_scan.address < hstart)
2704                 khugepaged_scan.address = hstart;
2705
2706         if (hpage_type == THP_TYPE_64KB) {
2707                 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_CONT_PTE_MASK);
2708                 hpage_size = HPAGE_CONT_PTE_SIZE; /* 64KB */
2709                 hpage_nr = HPAGE_CONT_PTE_NR;
2710         } else if (hpage_type == THP_TYPE_2MB) {
2711                 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2712                 hpage_size = HPAGE_PMD_SIZE; /* 2MB */
2713                 hpage_nr = HPAGE_PMD_NR;
2714                 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
2715                     !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
2716                                 HPAGE_PMD_NR)) {
2717                         /* fallback, vma or file not aligned to 2MB */
2718                         hpage_size = HPAGE_CONT_PTE_SIZE; /* 64KB */
2719                         hpage_nr = HPAGE_CONT_PTE_NR;
2720                         hpage_type = THP_TYPE_64KB;
2721                 }
2722         } else
2723                 BUG();
2724
2725         while (khugepaged_scan.address < hend) {
2726                 if (khugepaged_scan.address + hpage_size >= hend) {
2727                         if (khugepaged_scan.address + HPAGE_CONT_PTE_SIZE < hend) {
2728                                 hpage_size = HPAGE_CONT_PTE_SIZE;
2729                                 hpage_nr = HPAGE_CONT_PTE_NR;
2730                                 hpage_type = THP_TYPE_64KB;
2731                         }
2732                 }
2733                 ret = 0;
2734                 cond_resched();
2735                 if (unlikely(khugepaged_test_exit(mm)))
2736                         return KHUGEPAGE_SCAN_BREAK;
2737
2738                 VM_BUG_ON(khugepaged_scan.address < hstart ||
2739                                 khugepaged_scan.address + hpage_size >
2740                                 hend);
2741                 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2742                         struct file *file = get_file(vma->vm_file);
2743                         pgoff_t pgoff = linear_page_index(vma,
2744                                         khugepaged_scan.address);
2745
2746                         mmap_read_unlock(mm);
2747                         ret = 1;
2748                         khugepaged_scan_file(mm, file, pgoff, hpage, hpage_type);
2749                         fput(file);
2750                 } else {
2751                         ret = khugepaged_scan_pmd(mm, vma,
2752                                         khugepaged_scan.address,
2753                                         hpage, hpage_type);
2754                 }
2755                 /* move to next address */
2756                 khugepaged_scan.address += hpage_size;
2757                 *progress += hpage_nr;
2758                 if (ret)
2759                         /* we released mmap_sem so break loop */
2760                         return KHUGEPAGE_SCAN_BREAK_MMAP_LOCK;
2761                 if (*progress >= pages)
2762                         return KHUGEPAGE_SCAN_BREAK;
2763         }
2764         return KHUGEPAGE_SCAN_CONTINUE;
2765 }
2766
2767 static struct thp_scan_hint *find_scan_hint(struct mm_slot *slot,
2768                                                                 unsigned long addr)
2769 {
2770         struct thp_scan_hint *hint;
2771
2772         list_for_each_entry(hint, &khugepaged_scan.hint_list, hint_list) {
2773                 if (hint->slot == slot)
2774                         return hint;
2775         }
2776         return NULL;
2777 }
2778
2779 #ifdef CONFIG_THP_CONSERVATIVE
2780 /* caller must hold a proper mmap_lock */
2781 void khugepaged_mem_hook(struct mm_struct *mm, unsigned long addr,
2782                 long diff, const char *debug)
2783 {
2784         struct mm_slot *slot;
2785         struct vm_area_struct *vma;
2786         struct thp_scan_hint *hint;
2787         bool wakeup = false;
2788         bool retry = false;
2789
2790         vma = find_vma(mm, addr);
2791         if (!hugepage_vma_check(vma, vma->vm_flags))
2792                 return;
2793
2794 again:
2795         spin_lock(&khugepaged_mm_lock);
2796         slot = get_mm_slot(mm);
2797         if (!slot) {
2798                 /* make a new slot or go out */
2799                 spin_unlock(&khugepaged_mm_lock);
2800                 if (retry)
2801                         return;
2802                 if (__khugepaged_enter(mm))
2803                         return;
2804                 retry = true;
2805                 goto again;
2806         }
2807
2808         hint = find_scan_hint(slot, addr);
2809         if (!hint) {
2810                 spin_unlock(&khugepaged_mm_lock);
2811                 hint = kzalloc(sizeof(struct thp_scan_hint), GFP_KERNEL);
2812                 hint->vma = vma;
2813                 hint->slot = slot;
2814                 hint->diff = 0;
2815                 hint->jiffies = jiffies;
2816                 spin_lock(&khugepaged_mm_lock);
2817                 list_add(&hint->hint_list, &khugepaged_scan.hint_list);
2818                 khugepaged_scan.nr_hint++;
2819         }
2820         hint->diff += diff;
2821         if (hint->diff >= HPAGE_CONT_PTE_SIZE) {
2822                 wakeup = true;
2823                 //list_move(&hint->hint_list, &khugepaged_scan.hint_list);
2824         }
2825         spin_unlock(&khugepaged_mm_lock);
2826
2827         /* if possible, wake khugepaged up for starting a scan */
2828         if (wakeup) {
2829                 wake_up_interruptible(&khugepaged_wait);
2830         }
2831 }
2832 #else /* CONFIG_THP_CONSERVATIVE */
2833 void khugepaged_mem_hook(struct mm_struct *mm,
2834                         unsigned long addr, long diff, const char *debug)
2835 {}
2836 #endif /* CONFIG_THP_CONSERVATIVE */
2837
2838 static void clear_hint_list(struct mm_slot *slot)
2839 {
2840         struct thp_scan_hint *hint;
2841         hint = find_scan_hint(slot, 0);
2842         if (hint) {
2843                 list_del(&hint->hint_list);
2844                 kfree(hint);
2845                 khugepaged_scan.nr_hint--;
2846         }
2847 }
2848
2849 static struct thp_scan_hint *get_next_hint(void)
2850 {
2851         if (!list_empty(&khugepaged_scan.hint_list)) {
2852                 struct thp_scan_hint *hint = list_first_entry(
2853                                         &khugepaged_scan.hint_list,
2854                                         struct thp_scan_hint, hint_list);
2855                 list_del(&hint->hint_list);
2856                 khugepaged_scan.nr_hint--;
2857                 return hint;
2858         }
2859         return NULL;
2860 }
2861 #endif /* CONFIG_FINEGRAINED_THP */
2862
2863 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2864                                             struct page **hpage)
2865         __releases(&khugepaged_mm_lock)
2866         __acquires(&khugepaged_mm_lock)
2867 {
2868         struct mm_slot *mm_slot;
2869         struct mm_struct *mm;
2870         struct vm_area_struct *vma;
2871         int progress = 0;
2872
2873         VM_BUG_ON(!pages);
2874         lockdep_assert_held(&khugepaged_mm_lock);
2875
2876 #ifdef CONFIG_FINEGRAINED_THP
2877         if (khugepaged_scan.mm_slot)
2878                 mm_slot = khugepaged_scan.mm_slot;
2879         else if (!list_empty(&khugepaged_scan.hint_list)) {
2880                 struct thp_scan_hint *hint;
2881                 long mem_diff;
2882                 unsigned long jiffies_diff;
2883
2884 get_next_hint:
2885                 hint = get_next_hint();
2886                 if (!hint)
2887                         goto get_next_slot;
2888
2889                 mm_slot = hint->slot;
2890                 mem_diff = hint->diff;
2891                 jiffies_diff = jiffies - hint->jiffies;
2892                 kfree(hint);
2893                 clear_hint_list(mm_slot);
2894
2895                 if (khugepaged_test_exit(mm_slot->mm))
2896                         goto get_next_hint;
2897                 khugepaged_scan.address = 0;
2898                 khugepaged_scan.mm_slot = mm_slot;
2899         } else {
2900 get_next_slot:
2901                 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2902                                      struct mm_slot, mm_node);
2903                 clear_hint_list(mm_slot);
2904                 khugepaged_scan.address = 0;
2905                 khugepaged_scan.mm_slot = mm_slot;
2906         }
2907 #else /* CONFIG_FINEGRAINED_THP */
2908         if (khugepaged_scan.mm_slot)
2909                 mm_slot = khugepaged_scan.mm_slot;
2910         else {
2911                 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2912                                      struct mm_slot, mm_node);
2913                 khugepaged_scan.address = 0;
2914                 khugepaged_scan.mm_slot = mm_slot;
2915         }
2916 #endif /* CONFIG_FINEGRAINED_THP */
2917         spin_unlock(&khugepaged_mm_lock);
2918         khugepaged_collapse_pte_mapped_thps(mm_slot);
2919
2920         mm = mm_slot->mm;
2921         /*
2922          * Don't wait for semaphore (to avoid long wait times).  Just move to
2923          * the next mm on the list.
2924          */
2925         vma = NULL;
2926         if (unlikely(!mmap_read_trylock(mm)))
2927                 goto breakouterloop_mmap_lock;
2928         if (likely(!khugepaged_test_exit(mm)))
2929                 vma = find_vma(mm, khugepaged_scan.address);
2930
2931         progress++;
2932         for (; vma; vma = vma->vm_next) {
2933 #ifdef CONFIG_FINEGRAINED_THP
2934                 int ret;
2935 #else
2936                 unsigned long hstart, hend;
2937 #endif
2938
2939                 cond_resched();
2940                 if (unlikely(khugepaged_test_exit(mm))) {
2941                         progress++;
2942                         break;
2943                 }
2944 #ifdef CONFIG_FINEGRAINED_THP
2945                 ret = khugepaged_scan_vma(mm, vma, hpage, pages, &progress);
2946
2947                 if (ret == KHUGEPAGE_SCAN_CONTINUE) {
2948                         progress++;
2949                         continue;
2950                 } else if (ret == KHUGEPAGE_SCAN_BREAK)
2951                         goto breakouterloop;
2952                 else if (ret == KHUGEPAGE_SCAN_BREAK_MMAP_LOCK)
2953                         goto breakouterloop_mmap_lock;
2954 #else /* CONFIG_FINEGRAINED_THP */
2955                 if (!hugepage_vma_check(vma, vma->vm_flags)) {
2956 skip:
2957                         progress++;
2958                         continue;
2959                 }
2960                 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2961                 hend = vma->vm_end & HPAGE_PMD_MASK;
2962                 if (hstart >= hend)
2963                         goto skip;
2964                 if (khugepaged_scan.address > hend)
2965                         goto skip;
2966                 if (khugepaged_scan.address < hstart)
2967                         khugepaged_scan.address = hstart;
2968                 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2969                 if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
2970                         goto skip;
2971
2972                 while (khugepaged_scan.address < hend) {
2973                         int ret;
2974                         cond_resched();
2975                         if (unlikely(khugepaged_test_exit(mm)))
2976                                 goto breakouterloop;
2977
2978                         VM_BUG_ON(khugepaged_scan.address < hstart ||
2979                                   khugepaged_scan.address + HPAGE_PMD_SIZE >
2980                                   hend);
2981                         if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2982                                 struct file *file = get_file(vma->vm_file);
2983                                 pgoff_t pgoff = linear_page_index(vma,
2984                                                 khugepaged_scan.address);
2985
2986                                 mmap_read_unlock(mm);
2987                                 ret = 1;
2988                                 khugepaged_scan_file(mm, file, pgoff, hpage);
2989                                 fput(file);
2990                         } else {
2991                                 ret = khugepaged_scan_pmd(mm, vma,
2992                                                 khugepaged_scan.address,
2993                                                 hpage);
2994                         }
2995                         /* move to next address */
2996                         khugepaged_scan.address += HPAGE_PMD_SIZE;
2997                         progress += HPAGE_PMD_NR;
2998                         if (ret)
2999                                 /* we released mmap_lock so break loop */
3000                                 goto breakouterloop_mmap_lock;
3001                         if (progress >= pages)
3002                                 goto breakouterloop;
3003                 }
3004 #endif /* CONFIG_FINEGRAINED_THP */
3005         }
3006 breakouterloop:
3007         mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
3008 breakouterloop_mmap_lock:
3009
3010         spin_lock(&khugepaged_mm_lock);
3011         VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
3012         /*
3013          * Release the current mm_slot if this mm is about to die, or
3014          * if we scanned all vmas of this mm.
3015          */
3016         if (khugepaged_test_exit(mm) || !vma) {
3017                 /*
3018                  * Make sure that if mm_users is reaching zero while
3019                  * khugepaged runs here, khugepaged_exit will find
3020                  * mm_slot not pointing to the exiting mm.
3021                  */
3022 #ifdef CONFIG_FINEGRAINED_THP
3023                 if (!list_empty(&khugepaged_scan.hint_list)) {
3024                         unsigned long jiffies_diff;
3025                         long mem_diff;
3026                         struct thp_scan_hint *hint;
3027                         struct mm_slot *next_slot;
3028
3029 get_next_hint2:
3030                         hint = get_next_hint();
3031
3032                         if (!hint) {
3033                                 /* no more hint */
3034                                 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head)
3035                                         goto get_next_slot2;
3036                                 else
3037                                         goto end_loop;
3038                         }
3039
3040                         mem_diff = hint->diff;
3041                         jiffies_diff = jiffies - hint->jiffies;
3042                         next_slot = hint->slot;
3043                         kfree(hint);
3044
3045                         if (next_slot == mm_slot)
3046                                 goto get_next_hint2;
3047
3048                         if (!khugepaged_test_exit(next_slot->mm)) {
3049                                 list_move(&next_slot->mm_node, &mm_slot->mm_node);
3050                                 clear_hint_list(next_slot);
3051                         } else
3052                                 goto get_next_hint2;
3053
3054                         khugepaged_scan.mm_slot = next_slot;
3055                         khugepaged_scan.address = 0;
3056                 } else if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
3057 get_next_slot2:
3058                         khugepaged_scan.mm_slot = list_entry(
3059                                 mm_slot->mm_node.next,
3060                                 struct mm_slot, mm_node);
3061                         clear_hint_list(khugepaged_scan.mm_slot);
3062                         khugepaged_scan.address = 0;
3063                 } else {
3064 end_loop:
3065                         khugepaged_scan.mm_slot = NULL;
3066                         khugepaged_full_scans++;
3067                 }
3068 #else /* CONFIG_FINEGRAINED_THP */
3069                 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
3070                         khugepaged_scan.mm_slot = list_entry(
3071                                 mm_slot->mm_node.next,
3072                                 struct mm_slot, mm_node);
3073                         khugepaged_scan.address = 0;
3074                 } else {
3075                         khugepaged_scan.mm_slot = NULL;
3076                         khugepaged_full_scans++;
3077                 }
3078 #endif /* CONFIG_FINEGRAINED_THP */
3079                 collect_mm_slot(mm_slot);
3080         }
3081
3082         return progress;
3083 }
3084
3085 static int khugepaged_has_work(void)
3086 {
3087         return !list_empty(&khugepaged_scan.mm_head) &&
3088                 khugepaged_enabled();
3089 }
3090
3091 static int khugepaged_wait_event(void)
3092 {
3093         return !list_empty(&khugepaged_scan.mm_head) ||
3094                 kthread_should_stop();
3095 }
3096
3097 static void khugepaged_do_scan(void)
3098 {
3099         struct page *hpage = NULL;
3100         unsigned int progress = 0, pass_through_head = 0;
3101         unsigned int pages = khugepaged_pages_to_scan;
3102         bool wait = true;
3103
3104         barrier(); /* write khugepaged_pages_to_scan to local stack */
3105
3106         lru_add_drain_all();
3107
3108         while (progress < pages) {
3109                 if (!khugepaged_prealloc_page(&hpage, &wait))
3110                         break;
3111
3112                 cond_resched();
3113
3114                 if (unlikely(kthread_should_stop() || try_to_freeze()))
3115                         break;
3116
3117                 spin_lock(&khugepaged_mm_lock);
3118                 if (!khugepaged_scan.mm_slot)
3119                         pass_through_head++;
3120                 if (khugepaged_has_work() &&
3121                     pass_through_head < 2)
3122                         progress += khugepaged_scan_mm_slot(pages - progress,
3123                                                             &hpage);
3124                 else
3125                         progress = pages;
3126                 spin_unlock(&khugepaged_mm_lock);
3127         }
3128
3129         if (!IS_ERR_OR_NULL(hpage))
3130                 put_page(hpage);
3131 }
3132
3133 static bool khugepaged_should_wakeup(void)
3134 {
3135         return kthread_should_stop() ||
3136                time_after_eq(jiffies, khugepaged_sleep_expire);
3137 }
3138
3139 static void khugepaged_wait_work(void)
3140 {
3141         if (khugepaged_has_work()) {
3142                 const unsigned long scan_sleep_jiffies =
3143                         msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
3144
3145                 if (!scan_sleep_jiffies)
3146                         return;
3147
3148                 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
3149                 wait_event_freezable_timeout(khugepaged_wait,
3150                                              khugepaged_should_wakeup(),
3151                                              scan_sleep_jiffies);
3152                 return;
3153         }
3154
3155         if (khugepaged_enabled())
3156                 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
3157 }
3158
3159 #include <linux/delay.h>
3160 bool eager_allocation = false;
3161
3162 static int khugepaged(void *none)
3163 {
3164         struct mm_slot *mm_slot;
3165
3166         set_freezable();
3167         set_user_nice(current, MAX_NICE);
3168
3169         while (!kthread_should_stop()) {
3170                 khugepaged_do_scan();
3171                 khugepaged_wait_work();
3172         }
3173
3174         spin_lock(&khugepaged_mm_lock);
3175         mm_slot = khugepaged_scan.mm_slot;
3176         khugepaged_scan.mm_slot = NULL;
3177         if (mm_slot)
3178                 collect_mm_slot(mm_slot);
3179         spin_unlock(&khugepaged_mm_lock);
3180         return 0;
3181 }
3182
3183 static void set_recommended_min_free_kbytes(void)
3184 {
3185         struct zone *zone;
3186         int nr_zones = 0;
3187         unsigned long recommended_min;
3188
3189         for_each_populated_zone(zone) {
3190                 /*
3191                  * We don't need to worry about fragmentation of
3192                  * ZONE_MOVABLE since it only has movable pages.
3193                  */
3194                 if (zone_idx(zone) > gfp_zone(GFP_USER))
3195                         continue;
3196
3197                 nr_zones++;
3198         }
3199
3200         /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
3201         recommended_min = pageblock_nr_pages * nr_zones * 2;
3202
3203         /*
3204          * Make sure that on average at least two pageblocks are almost free
3205          * of another type, one for a migratetype to fall back to and a
3206          * second to avoid subsequent fallbacks of other types There are 3
3207          * MIGRATE_TYPES we care about.
3208          */
3209         recommended_min += pageblock_nr_pages * nr_zones *
3210                            MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
3211
3212         /* don't ever allow to reserve more than 5% of the lowmem */
3213         recommended_min = min(recommended_min,
3214                               (unsigned long) nr_free_buffer_pages() / 20);
3215         recommended_min <<= (PAGE_SHIFT-10);
3216
3217         if (recommended_min > min_free_kbytes) {
3218                 if (user_min_free_kbytes >= 0)
3219                         pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
3220                                 min_free_kbytes, recommended_min);
3221
3222                 min_free_kbytes = recommended_min;
3223         }
3224         setup_per_zone_wmarks();
3225 }
3226
3227 int start_stop_khugepaged(void)
3228 {
3229         int err = 0;
3230
3231         mutex_lock(&khugepaged_mutex);
3232         if (khugepaged_enabled()) {
3233                 if (!khugepaged_thread)
3234                         khugepaged_thread = kthread_run(khugepaged, NULL,
3235                                                         "khugepaged");
3236                 if (IS_ERR(khugepaged_thread)) {
3237                         pr_err("khugepaged: kthread_run(khugepaged) failed\n");
3238                         err = PTR_ERR(khugepaged_thread);
3239                         khugepaged_thread = NULL;
3240                         goto fail;
3241                 }
3242
3243                 if (!list_empty(&khugepaged_scan.mm_head))
3244                         wake_up_interruptible(&khugepaged_wait);
3245
3246                 set_recommended_min_free_kbytes();
3247         } else if (khugepaged_thread) {
3248                 kthread_stop(khugepaged_thread);
3249                 khugepaged_thread = NULL;
3250         }
3251 fail:
3252         mutex_unlock(&khugepaged_mutex);
3253         return err;
3254 }
3255
3256 void khugepaged_min_free_kbytes_update(void)
3257 {
3258         mutex_lock(&khugepaged_mutex);
3259         if (khugepaged_enabled() && khugepaged_thread)
3260                 set_recommended_min_free_kbytes();
3261         mutex_unlock(&khugepaged_mutex);
3262 }