Merge "kfence: Use pt_regs to generate stack trace on faults" into tizen
[platform/kernel/linux-rpi.git] / mm / khugepaged.c
1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/swapops.h>
20 #include <linux/shmem_fs.h>
21
22 #include <asm/tlb.h>
23 #include <asm/pgalloc.h>
24 #ifdef CONFIG_FINEGRAINED_THP
25 #include <asm/finegrained_thp.h>
26 #include <asm/huge_mm.h>
27 #else
28 #include <asm-generic/finegrained_thp.h>
29 #include <asm-generic/huge_mm.h>
30 #endif
31 #include "internal.h"
32
33 enum scan_result {
34         SCAN_FAIL,
35         SCAN_SUCCEED,
36         SCAN_PMD_NULL,
37         SCAN_EXCEED_NONE_PTE,
38         SCAN_EXCEED_SWAP_PTE,
39         SCAN_EXCEED_SHARED_PTE,
40         SCAN_PTE_NON_PRESENT,
41         SCAN_PTE_UFFD_WP,
42         SCAN_PAGE_RO,
43         SCAN_LACK_REFERENCED_PAGE,
44         SCAN_PAGE_NULL,
45         SCAN_SCAN_ABORT,
46         SCAN_PAGE_COUNT,
47         SCAN_PAGE_LRU,
48         SCAN_PAGE_LOCK,
49         SCAN_PAGE_ANON,
50         SCAN_PAGE_COMPOUND,
51         SCAN_ANY_PROCESS,
52         SCAN_VMA_NULL,
53         SCAN_VMA_CHECK,
54         SCAN_ADDRESS_RANGE,
55         SCAN_SWAP_CACHE_PAGE,
56         SCAN_DEL_PAGE_LRU,
57         SCAN_ALLOC_HUGE_PAGE_FAIL,
58         SCAN_CGROUP_CHARGE_FAIL,
59         SCAN_TRUNCATED,
60         SCAN_PAGE_HAS_PRIVATE,
61 };
62
63 #define CREATE_TRACE_POINTS
64 #include <trace/events/huge_memory.h>
65
66 static struct task_struct *khugepaged_thread __read_mostly;
67 static DEFINE_MUTEX(khugepaged_mutex);
68
69 /* default scan 8*512 pte (or vmas) every 30 second */
70 static unsigned int khugepaged_pages_to_scan __read_mostly;
71 static unsigned int khugepaged_pages_collapsed;
72 static unsigned int khugepaged_full_scans;
73 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
74 /* during fragmentation poll the hugepage allocator once every minute */
75 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
76 static unsigned long khugepaged_sleep_expire;
77 static DEFINE_SPINLOCK(khugepaged_mm_lock);
78 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
79 /*
80  * default collapse hugepages if there is at least one pte mapped like
81  * it would have happened if the vma was large enough during page
82  * fault.
83  */
84 static unsigned int khugepaged_max_ptes_none __read_mostly;
85 static unsigned int khugepaged_max_ptes_swap __read_mostly;
86 static unsigned int khugepaged_max_ptes_shared __read_mostly;
87
88 #ifdef CONFIG_FINEGRAINED_THP
89 /*
90  * thp_scan_hint:
91  * it used for providing hints to khugepaged
92  * which address space is changed recently.
93  */
94 struct thp_scan_hint {
95         struct mm_slot *slot;
96         struct vm_area_struct *vma;
97         unsigned long diff;             /* memory difference */
98         unsigned long jiffies;          /* time stamp for profiling purpose */
99         struct list_head hint_list;
100 };
101
102 /* THP type descriptor */
103 enum {
104         THP_TYPE_FAIL,  /* cannot make hugepage */
105         THP_TYPE_64KB,  /* 64KB hugepage can be made, use CONT_PTE */
106         THP_TYPE_2MB,   /* 2MB hugepage can be made, use PMD */
107 };
108
109 static unsigned int khugepaged_max_ptes_none_64kb __read_mostly;
110 static unsigned int khugepaged_max_ptes_swap_64kb __read_mostly;
111 static unsigned int khugepaged_max_ptes_shared_64kb __read_mostly;
112 #endif /* CONFIG_FINEGRAINED_THP */
113
114 #define MM_SLOTS_HASH_BITS 10
115 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
116
117 static struct kmem_cache *mm_slot_cache __read_mostly;
118
119 #define MAX_PTE_MAPPED_THP 8
120
121 /**
122  * struct mm_slot - hash lookup from mm to mm_slot
123  * @hash: hash collision list
124  * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
125  * @mm: the mm that this information is valid for
126  */
127 struct mm_slot {
128         struct hlist_node hash;
129         struct list_head mm_node;
130         struct mm_struct *mm;
131
132         /* pte-mapped THP in this mm */
133         int nr_pte_mapped_thp;
134         unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
135 };
136
137 /**
138  * struct khugepaged_scan - cursor for scanning
139  * @mm_head: the head of the mm list to scan
140  * @mm_slot: the current mm_slot we are scanning
141  * @address: the next address inside that to be scanned
142  *
143  * There is only the one khugepaged_scan instance of this cursor structure.
144  */
145 struct khugepaged_scan {
146         struct list_head mm_head;
147         struct mm_slot *mm_slot;
148         unsigned long address;
149 #ifdef CONFIG_FINEGRAINED_THP
150         int hpage_type;
151         int nr_hint;
152         struct list_head hint_list;
153 #endif /* CONFIG_FINEGRAINED_THP */
154 };
155
156 static struct khugepaged_scan khugepaged_scan = {
157         .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
158 #ifdef CONFIG_FINEGRAINED_THP
159         .hint_list = LIST_HEAD_INIT(khugepaged_scan.hint_list),
160 #endif
161 };
162
163 #ifdef CONFIG_SYSFS
164 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
165                                          struct kobj_attribute *attr,
166                                          char *buf)
167 {
168         return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
169 }
170
171 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
172                                           struct kobj_attribute *attr,
173                                           const char *buf, size_t count)
174 {
175         unsigned long msecs;
176         int err;
177
178         err = kstrtoul(buf, 10, &msecs);
179         if (err || msecs > UINT_MAX)
180                 return -EINVAL;
181
182         khugepaged_scan_sleep_millisecs = msecs;
183         khugepaged_sleep_expire = 0;
184         wake_up_interruptible(&khugepaged_wait);
185
186         return count;
187 }
188 static struct kobj_attribute scan_sleep_millisecs_attr =
189         __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
190                scan_sleep_millisecs_store);
191
192 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
193                                           struct kobj_attribute *attr,
194                                           char *buf)
195 {
196         return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
197 }
198
199 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
200                                            struct kobj_attribute *attr,
201                                            const char *buf, size_t count)
202 {
203         unsigned long msecs;
204         int err;
205
206         err = kstrtoul(buf, 10, &msecs);
207         if (err || msecs > UINT_MAX)
208                 return -EINVAL;
209
210         khugepaged_alloc_sleep_millisecs = msecs;
211         khugepaged_sleep_expire = 0;
212         wake_up_interruptible(&khugepaged_wait);
213
214         return count;
215 }
216 static struct kobj_attribute alloc_sleep_millisecs_attr =
217         __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
218                alloc_sleep_millisecs_store);
219
220 static ssize_t pages_to_scan_show(struct kobject *kobj,
221                                   struct kobj_attribute *attr,
222                                   char *buf)
223 {
224         return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
225 }
226 static ssize_t pages_to_scan_store(struct kobject *kobj,
227                                    struct kobj_attribute *attr,
228                                    const char *buf, size_t count)
229 {
230         int err;
231         unsigned long pages;
232
233         err = kstrtoul(buf, 10, &pages);
234         if (err || !pages || pages > UINT_MAX)
235                 return -EINVAL;
236
237         khugepaged_pages_to_scan = pages;
238
239         return count;
240 }
241 static struct kobj_attribute pages_to_scan_attr =
242         __ATTR(pages_to_scan, 0644, pages_to_scan_show,
243                pages_to_scan_store);
244
245 static ssize_t pages_collapsed_show(struct kobject *kobj,
246                                     struct kobj_attribute *attr,
247                                     char *buf)
248 {
249         return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
250 }
251 static struct kobj_attribute pages_collapsed_attr =
252         __ATTR_RO(pages_collapsed);
253
254 static ssize_t full_scans_show(struct kobject *kobj,
255                                struct kobj_attribute *attr,
256                                char *buf)
257 {
258         return sprintf(buf, "%u\n", khugepaged_full_scans);
259 }
260 static struct kobj_attribute full_scans_attr =
261         __ATTR_RO(full_scans);
262
263 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
264                                       struct kobj_attribute *attr, char *buf)
265 {
266         return single_hugepage_flag_show(kobj, attr, buf,
267                                 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
268 }
269 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
270                                        struct kobj_attribute *attr,
271                                        const char *buf, size_t count)
272 {
273         return single_hugepage_flag_store(kobj, attr, buf, count,
274                                  TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
275 }
276 static struct kobj_attribute khugepaged_defrag_attr =
277         __ATTR(defrag, 0644, khugepaged_defrag_show,
278                khugepaged_defrag_store);
279
280 /*
281  * max_ptes_none controls if khugepaged should collapse hugepages over
282  * any unmapped ptes in turn potentially increasing the memory
283  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
284  * reduce the available free memory in the system as it
285  * runs. Increasing max_ptes_none will instead potentially reduce the
286  * free memory in the system during the khugepaged scan.
287  */
288 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
289                                              struct kobj_attribute *attr,
290                                              char *buf)
291 {
292         return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
293 }
294 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
295                                               struct kobj_attribute *attr,
296                                               const char *buf, size_t count)
297 {
298         int err;
299         unsigned long max_ptes_none;
300
301         err = kstrtoul(buf, 10, &max_ptes_none);
302         if (err || max_ptes_none > HPAGE_PMD_NR-1)
303                 return -EINVAL;
304
305         khugepaged_max_ptes_none = max_ptes_none;
306
307         return count;
308 }
309 static struct kobj_attribute khugepaged_max_ptes_none_attr =
310         __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
311                khugepaged_max_ptes_none_store);
312
313 static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
314                                              struct kobj_attribute *attr,
315                                              char *buf)
316 {
317         return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
318 }
319
320 static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
321                                               struct kobj_attribute *attr,
322                                               const char *buf, size_t count)
323 {
324         int err;
325         unsigned long max_ptes_swap;
326
327         err  = kstrtoul(buf, 10, &max_ptes_swap);
328         if (err || max_ptes_swap > HPAGE_PMD_NR-1)
329                 return -EINVAL;
330
331         khugepaged_max_ptes_swap = max_ptes_swap;
332
333         return count;
334 }
335
336 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
337         __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
338                khugepaged_max_ptes_swap_store);
339
340 static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj,
341                                              struct kobj_attribute *attr,
342                                              char *buf)
343 {
344         return sprintf(buf, "%u\n", khugepaged_max_ptes_shared);
345 }
346
347 static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
348                                               struct kobj_attribute *attr,
349                                               const char *buf, size_t count)
350 {
351         int err;
352         unsigned long max_ptes_shared;
353
354         err  = kstrtoul(buf, 10, &max_ptes_shared);
355         if (err || max_ptes_shared > HPAGE_PMD_NR-1)
356                 return -EINVAL;
357
358         khugepaged_max_ptes_shared = max_ptes_shared;
359
360         return count;
361 }
362
363 static struct kobj_attribute khugepaged_max_ptes_shared_attr =
364         __ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show,
365                khugepaged_max_ptes_shared_store);
366
367 static struct attribute *khugepaged_attr[] = {
368         &khugepaged_defrag_attr.attr,
369         &khugepaged_max_ptes_none_attr.attr,
370         &khugepaged_max_ptes_swap_attr.attr,
371         &khugepaged_max_ptes_shared_attr.attr,
372         &pages_to_scan_attr.attr,
373         &pages_collapsed_attr.attr,
374         &full_scans_attr.attr,
375         &scan_sleep_millisecs_attr.attr,
376         &alloc_sleep_millisecs_attr.attr,
377         NULL,
378 };
379
380 struct attribute_group khugepaged_attr_group = {
381         .attrs = khugepaged_attr,
382         .name = "khugepaged",
383 };
384 #endif /* CONFIG_SYSFS */
385
386 int hugepage_madvise(struct vm_area_struct *vma,
387                      unsigned long *vm_flags, int advice)
388 {
389         switch (advice) {
390         case MADV_HUGEPAGE:
391 #ifdef CONFIG_S390
392                 /*
393                  * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
394                  * can't handle this properly after s390_enable_sie, so we simply
395                  * ignore the madvise to prevent qemu from causing a SIGSEGV.
396                  */
397                 if (mm_has_pgste(vma->vm_mm))
398                         return 0;
399 #endif
400                 *vm_flags &= ~VM_NOHUGEPAGE;
401                 *vm_flags |= VM_HUGEPAGE;
402                 /*
403                  * If the vma become good for khugepaged to scan,
404                  * register it here without waiting a page fault that
405                  * may not happen any time soon.
406                  */
407                 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
408                                 khugepaged_enter_vma_merge(vma, *vm_flags))
409                         return -ENOMEM;
410                 break;
411         case MADV_NOHUGEPAGE:
412                 *vm_flags &= ~VM_HUGEPAGE;
413                 *vm_flags |= VM_NOHUGEPAGE;
414                 /*
415                  * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
416                  * this vma even if we leave the mm registered in khugepaged if
417                  * it got registered before VM_NOHUGEPAGE was set.
418                  */
419                 break;
420         }
421
422         return 0;
423 }
424
425 int __init khugepaged_init(void)
426 {
427         mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
428                                           sizeof(struct mm_slot),
429                                           __alignof__(struct mm_slot), 0, NULL);
430         if (!mm_slot_cache)
431                 return -ENOMEM;
432
433         khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
434         khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
435         khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
436         khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
437
438 #ifdef CONFIG_FINEGRAINED_THP
439         khugepaged_max_ptes_none_64kb = HPAGE_CONT_PTE_NR - 1;
440         khugepaged_max_ptes_swap_64kb = HPAGE_CONT_PTE_NR / 8;
441         khugepaged_max_ptes_shared_64kb = HPAGE_CONT_PTE_NR / 2;
442 #endif
443         return 0;
444 }
445
446 void __init khugepaged_destroy(void)
447 {
448         kmem_cache_destroy(mm_slot_cache);
449 }
450
451 static inline struct mm_slot *alloc_mm_slot(void)
452 {
453         if (!mm_slot_cache)     /* initialization failed */
454                 return NULL;
455         return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
456 }
457
458 static inline void free_mm_slot(struct mm_slot *mm_slot)
459 {
460         kmem_cache_free(mm_slot_cache, mm_slot);
461 }
462
463 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
464 {
465         struct mm_slot *mm_slot;
466
467         hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
468                 if (mm == mm_slot->mm)
469                         return mm_slot;
470
471         return NULL;
472 }
473
474 static void insert_to_mm_slots_hash(struct mm_struct *mm,
475                                     struct mm_slot *mm_slot)
476 {
477         mm_slot->mm = mm;
478         hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
479 }
480
481 static inline int khugepaged_test_exit(struct mm_struct *mm)
482 {
483         return atomic_read(&mm->mm_users) == 0;
484 }
485
486 #ifdef CONFIG_FINEGRAINED_THP
487 static void clear_hint_list(struct mm_slot *slot);
488 #endif /* CONFIG_FINEGRAINED_THP */
489
490 static bool hugepage_vma_check(struct vm_area_struct *vma,
491                                unsigned long vm_flags)
492 {
493         if (!transhuge_vma_enabled(vma, vm_flags))
494                 return false;
495
496         if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) -
497                                 vma->vm_pgoff, HPAGE_PMD_NR))
498                 return false;
499
500         /* Check arch-dependent shmem hugepage available */
501         if (arch_hugepage_vma_shmem_check(vma, vm_flags))
502                 return true;
503         /* Enabled via shmem mount options or sysfs settings. */
504         if (shmem_file(vma->vm_file))
505                 return shmem_huge_enabled(vma);
506
507         /* THP settings require madvise. */
508         if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
509                 return false;
510
511         /* Check arch-dependent file hugepage available */
512         if (arch_hugepage_vma_file_check(vma, vm_flags))
513                 return true;
514         /* Only regular file is valid */
515         else if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
516             (vm_flags & VM_DENYWRITE)) {
517                 struct inode *inode = vma->vm_file->f_inode;
518
519                 return S_ISREG(inode->i_mode);
520         }
521
522         if (!vma->anon_vma || vma->vm_ops)
523                 return false;
524         if (vma_is_temporary_stack(vma))
525                 return false;
526         return !(vm_flags & VM_NO_KHUGEPAGED);
527 }
528
529 int __khugepaged_enter(struct mm_struct *mm)
530 {
531         struct mm_slot *mm_slot;
532         int wakeup;
533
534         mm_slot = alloc_mm_slot();
535         if (!mm_slot)
536                 return -ENOMEM;
537
538         /* __khugepaged_exit() must not run from under us */
539         VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
540         if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
541                 free_mm_slot(mm_slot);
542                 return 0;
543         }
544
545         spin_lock(&khugepaged_mm_lock);
546         insert_to_mm_slots_hash(mm, mm_slot);
547         /*
548          * Insert just behind the scanning cursor, to let the area settle
549          * down a little.
550          */
551         wakeup = list_empty(&khugepaged_scan.mm_head);
552         list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
553         spin_unlock(&khugepaged_mm_lock);
554
555         mmgrab(mm);
556         if (wakeup)
557                 wake_up_interruptible(&khugepaged_wait);
558
559         return 0;
560 }
561
562 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
563                                unsigned long vm_flags)
564 {
565         unsigned long hstart, hend;
566
567         /*
568          * khugepaged only supports read-only files for non-shmem files.
569          * khugepaged does not yet work on special mappings. And
570          * file-private shmem THP is not supported.
571          */
572         if (!hugepage_vma_check(vma, vm_flags))
573                 return 0;
574
575         hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
576         hend = vma->vm_end & HPAGE_PMD_MASK;
577         if (hstart < hend)
578                 return khugepaged_enter(vma, vm_flags);
579 #ifdef CONFIG_FINEGRAINED_THP
580         hstart = (vma->vm_start + ~HPAGE_CONT_PTE_MASK) & HPAGE_CONT_PTE_MASK;
581         hend = vma->vm_end & HPAGE_CONT_PTE_MASK;
582         if (hstart < hend)
583                 return khugepaged_enter(vma, vm_flags);
584 #endif /* CONFIG_FINEGRAINED_THP */
585         return 0;
586 }
587
588 void __khugepaged_exit(struct mm_struct *mm)
589 {
590         struct mm_slot *mm_slot;
591         int free = 0;
592
593         spin_lock(&khugepaged_mm_lock);
594         mm_slot = get_mm_slot(mm);
595         if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
596 #ifdef CONFIG_FINEGRAINED_THP
597                 clear_hint_list(mm_slot);
598 #endif
599                 hash_del(&mm_slot->hash);
600                 list_del(&mm_slot->mm_node);
601                 free = 1;
602         }
603         spin_unlock(&khugepaged_mm_lock);
604
605         if (free) {
606                 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
607                 free_mm_slot(mm_slot);
608                 mmdrop(mm);
609         } else if (mm_slot) {
610                 /*
611                  * This is required to serialize against
612                  * khugepaged_test_exit() (which is guaranteed to run
613                  * under mmap sem read mode). Stop here (after we
614                  * return all pagetables will be destroyed) until
615                  * khugepaged has finished working on the pagetables
616                  * under the mmap_lock.
617                  */
618                 mmap_write_lock(mm);
619                 mmap_write_unlock(mm);
620         }
621 }
622
623 static void release_pte_page(struct page *page)
624 {
625         mod_node_page_state(page_pgdat(page),
626                         NR_ISOLATED_ANON + page_is_file_lru(page),
627                         -compound_nr(page));
628         unlock_page(page);
629         putback_lru_page(page);
630 }
631
632 static void release_pte_pages(pte_t *pte, pte_t *_pte,
633                 struct list_head *compound_pagelist)
634 {
635         struct page *page, *tmp;
636
637         while (--_pte >= pte) {
638                 pte_t pteval = *_pte;
639
640                 page = pte_page(pteval);
641                 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
642                                 !PageCompound(page))
643                         release_pte_page(page);
644         }
645
646         list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
647                 list_del(&page->lru);
648                 release_pte_page(page);
649         }
650 }
651
652 static bool is_refcount_suitable(struct page *page)
653 {
654         int expected_refcount;
655
656         expected_refcount = total_mapcount(page);
657         if (PageSwapCache(page))
658                 expected_refcount += compound_nr(page);
659
660         return page_count(page) == expected_refcount;
661 }
662
663 #ifdef CONFIG_FINEGRAINED_THP
664 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
665                                         unsigned long address,
666                                         pte_t *pte,
667                                         struct list_head *compound_pagelist,
668                                         int hpage_type)
669 #else /* CONFIG_FINEGRAINED_THP */
670 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
671                                         unsigned long address,
672                                         pte_t *pte,
673                                         struct list_head *compound_pagelist)
674 #endif /* CONFIG_FINEGRAINED_THP */
675 {
676         struct page *page = NULL;
677         pte_t *_pte;
678         int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
679         bool writable = false;
680 #ifdef CONFIG_FINEGRAINED_THP
681         int max_ptes_shared, max_ptes_none;
682         int hpage_nr;
683
684         if (hpage_type == THP_TYPE_64KB) {
685                 hpage_nr = HPAGE_CONT_PTE_NR;
686                 max_ptes_shared = khugepaged_max_ptes_shared_64kb;
687                 max_ptes_none = khugepaged_max_ptes_none_64kb;
688         } else {
689                 hpage_nr = HPAGE_PMD_NR;
690                 max_ptes_shared = khugepaged_max_ptes_shared;
691                 max_ptes_none = khugepaged_max_ptes_none;
692         }
693 #endif /* CONFIG_FINEGRAINED_THP */
694
695         for (_pte = pte;
696 #ifdef CONFIG_FINEGRAINED_THP
697                 _pte < pte + hpage_nr;
698 #else
699                 _pte < pte+HPAGE_PMD_NR;
700 #endif
701              _pte++, address += PAGE_SIZE) {
702                 pte_t pteval = *_pte;
703                 if (pte_none(pteval) || (pte_present(pteval) &&
704                                 is_zero_pfn(pte_pfn(pteval)))) {
705 #ifdef CONFIG_FINEGRAINED_THP
706                         if (!userfaultfd_armed(vma) &&
707                             ++none_or_zero <= max_ptes_none)
708 #else /* CONFIG_FINEGRAINED_THP */
709                         if (!userfaultfd_armed(vma) &&
710                             ++none_or_zero <= khugepaged_max_ptes_none)
711 #endif /* CONFIG_FINEGRAINED_THP */
712                         {
713                                 continue;
714                         } else {
715                                 result = SCAN_EXCEED_NONE_PTE;
716                                 goto out;
717                         }
718                 }
719                 if (!pte_present(pteval)) {
720                         result = SCAN_PTE_NON_PRESENT;
721                         goto out;
722                 }
723                 page = vm_normal_page(vma, address, pteval);
724                 if (unlikely(!page)) {
725                         result = SCAN_PAGE_NULL;
726                         goto out;
727                 }
728
729                 VM_BUG_ON_PAGE(!PageAnon(page), page);
730
731 #ifdef CONFIG_FINEGRAINED_THP
732                 if (page_mapcount(page) > 1 &&
733                                 ++shared > max_ptes_shared)
734 #else /* CONFIG_FINEGRAINED_THP */
735                 if (page_mapcount(page) > 1 &&
736                                 ++shared > khugepaged_max_ptes_shared)
737 #endif /* CONFIG_FINEGRAINED_THP */
738                 {
739                         result = SCAN_EXCEED_SHARED_PTE;
740                         goto out;
741                 }
742
743                 if (PageCompound(page)) {
744                         struct page *p;
745                         page = compound_head(page);
746
747                         /*
748                          * Check if we have dealt with the compound page
749                          * already
750                          */
751                         list_for_each_entry(p, compound_pagelist, lru) {
752                                 if (page == p)
753                                         goto next;
754                         }
755                 }
756
757                 /*
758                  * We can do it before isolate_lru_page because the
759                  * page can't be freed from under us. NOTE: PG_lock
760                  * is needed to serialize against split_huge_page
761                  * when invoked from the VM.
762                  */
763                 if (!trylock_page(page)) {
764                         result = SCAN_PAGE_LOCK;
765                         goto out;
766                 }
767
768                 /*
769                  * Check if the page has any GUP (or other external) pins.
770                  *
771                  * The page table that maps the page has been already unlinked
772                  * from the page table tree and this process cannot get
773                  * an additinal pin on the page.
774                  *
775                  * New pins can come later if the page is shared across fork,
776                  * but not from this process. The other process cannot write to
777                  * the page, only trigger CoW.
778                  */
779                 if (!is_refcount_suitable(page)) {
780                         unlock_page(page);
781                         result = SCAN_PAGE_COUNT;
782                         goto out;
783                 }
784                 if (!pte_write(pteval) && PageSwapCache(page) &&
785                                 !reuse_swap_page(page, NULL)) {
786                         /*
787                          * Page is in the swap cache and cannot be re-used.
788                          * It cannot be collapsed into a THP.
789                          */
790                         unlock_page(page);
791                         result = SCAN_SWAP_CACHE_PAGE;
792                         goto out;
793                 }
794
795                 /*
796                  * Isolate the page to avoid collapsing an hugepage
797                  * currently in use by the VM.
798                  */
799                 if (isolate_lru_page(page)) {
800                         unlock_page(page);
801                         result = SCAN_DEL_PAGE_LRU;
802                         goto out;
803                 }
804                 mod_node_page_state(page_pgdat(page),
805                                 NR_ISOLATED_ANON + page_is_file_lru(page),
806                                 compound_nr(page));
807                 VM_BUG_ON_PAGE(!PageLocked(page), page);
808                 VM_BUG_ON_PAGE(PageLRU(page), page);
809
810                 if (PageCompound(page))
811                         list_add_tail(&page->lru, compound_pagelist);
812 next:
813                 /* There should be enough young pte to collapse the page */
814                 if (pte_young(pteval) ||
815                     page_is_young(page) || PageReferenced(page) ||
816                     mmu_notifier_test_young(vma->vm_mm, address))
817                         referenced++;
818
819                 if (pte_write(pteval))
820                         writable = true;
821         }
822
823         if (unlikely(!writable)) {
824                 result = SCAN_PAGE_RO;
825         } else if (unlikely(!referenced)) {
826                 result = SCAN_LACK_REFERENCED_PAGE;
827         } else {
828                 result = SCAN_SUCCEED;
829                 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
830                                                     referenced, writable, result);
831                 return 1;
832         }
833 out:
834         release_pte_pages(pte, _pte, compound_pagelist);
835         trace_mm_collapse_huge_page_isolate(page, none_or_zero,
836                                             referenced, writable, result);
837         return 0;
838 }
839
840 #ifdef CONFIG_FINEGRAINED_THP
841 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
842                                       struct vm_area_struct *vma,
843                                       unsigned long address,
844                                       spinlock_t *ptl,
845                                       struct list_head *compound_pagelist,
846                                       int hpage_type)
847 #else /* CONFIG_FINEGRAINED_THP */
848 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
849                                       struct vm_area_struct *vma,
850                                       unsigned long address,
851                                       spinlock_t *ptl,
852                                       struct list_head *compound_pagelist)
853 #endif /* CONFIG_FINEGRAINED_THP */
854 {
855         struct page *src_page, *tmp;
856         pte_t *_pte;
857 #ifdef CONFIG_FINEGRAINED_THP
858         int hpage_nr = (hpage_type == THP_TYPE_64KB ?
859                                         HPAGE_CONT_PTE_NR : HPAGE_PMD_NR);
860 #endif
861
862         for (_pte = pte;
863 #ifdef CONFIG_FINEGRAINED_THP
864                                 _pte < pte + hpage_nr;
865 #else
866                                 _pte < pte + HPAGE_PMD_NR;
867 #endif
868                                 _pte++, page++, address += PAGE_SIZE) {
869                 pte_t pteval = *_pte;
870
871                 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
872                         clear_user_highpage(page, address);
873                         add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
874                         if (is_zero_pfn(pte_pfn(pteval))) {
875                                 /*
876                                  * ptl mostly unnecessary.
877                                  */
878                                 spin_lock(ptl);
879                                 /*
880                                  * paravirt calls inside pte_clear here are
881                                  * superfluous.
882                                  */
883                                 pte_clear(vma->vm_mm, address, _pte);
884                                 spin_unlock(ptl);
885                         }
886                 } else {
887                         src_page = pte_page(pteval);
888                         copy_user_highpage(page, src_page, address, vma);
889                         if (!PageCompound(src_page))
890                                 release_pte_page(src_page);
891                         /*
892                          * ptl mostly unnecessary, but preempt has to
893                          * be disabled to update the per-cpu stats
894                          * inside page_remove_rmap().
895                          */
896                         spin_lock(ptl);
897                         /*
898                          * paravirt calls inside pte_clear here are
899                          * superfluous.
900                          */
901                         pte_clear(vma->vm_mm, address, _pte);
902                         page_remove_rmap(src_page, false);
903                         spin_unlock(ptl);
904                         free_page_and_swap_cache(src_page);
905                 }
906         }
907
908         list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
909                 list_del(&src_page->lru);
910                 release_pte_page(src_page);
911         }
912 }
913
914 static void khugepaged_alloc_sleep(void)
915 {
916         DEFINE_WAIT(wait);
917
918         add_wait_queue(&khugepaged_wait, &wait);
919         freezable_schedule_timeout_interruptible(
920                 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
921         remove_wait_queue(&khugepaged_wait, &wait);
922 }
923
924 static int khugepaged_node_load[MAX_NUMNODES];
925
926 static bool khugepaged_scan_abort(int nid)
927 {
928         int i;
929
930         /*
931          * If node_reclaim_mode is disabled, then no extra effort is made to
932          * allocate memory locally.
933          */
934         if (!node_reclaim_mode)
935                 return false;
936
937         /* If there is a count for this node already, it must be acceptable */
938         if (khugepaged_node_load[nid])
939                 return false;
940
941         for (i = 0; i < MAX_NUMNODES; i++) {
942                 if (!khugepaged_node_load[i])
943                         continue;
944                 if (node_distance(nid, i) > node_reclaim_distance)
945                         return true;
946         }
947         return false;
948 }
949
950 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
951 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
952 {
953         return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
954 }
955
956 #ifdef CONFIG_NUMA
957 static int khugepaged_find_target_node(void)
958 {
959         static int last_khugepaged_target_node = NUMA_NO_NODE;
960         int nid, target_node = 0, max_value = 0;
961
962         /* find first node with max normal pages hit */
963         for (nid = 0; nid < MAX_NUMNODES; nid++)
964                 if (khugepaged_node_load[nid] > max_value) {
965                         max_value = khugepaged_node_load[nid];
966                         target_node = nid;
967                 }
968
969         /* do some balance if several nodes have the same hit record */
970         if (target_node <= last_khugepaged_target_node)
971                 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
972                                 nid++)
973                         if (max_value == khugepaged_node_load[nid]) {
974                                 target_node = nid;
975                                 break;
976                         }
977
978         last_khugepaged_target_node = target_node;
979         return target_node;
980 }
981
982 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
983 {
984         if (IS_ERR(*hpage)) {
985                 if (!*wait)
986                         return false;
987
988                 *wait = false;
989                 *hpage = NULL;
990                 khugepaged_alloc_sleep();
991         } else if (*hpage) {
992                 put_page(*hpage);
993                 *hpage = NULL;
994         }
995
996         return true;
997 }
998
999 static struct page *
1000 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
1001 {
1002         VM_BUG_ON_PAGE(*hpage, *hpage);
1003
1004         *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
1005         if (unlikely(!*hpage)) {
1006                 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1007                 *hpage = ERR_PTR(-ENOMEM);
1008                 return NULL;
1009         }
1010
1011         prep_transhuge_page(*hpage);
1012         count_vm_event(THP_COLLAPSE_ALLOC);
1013         return *hpage;
1014 }
1015 #else
1016 static int khugepaged_find_target_node(void)
1017 {
1018         return 0;
1019 }
1020
1021 #ifdef CONFIG_FINEGRAINED_THP
1022 static inline struct page *alloc_khugepaged_hugepage(int hpage_order)
1023 #else
1024 static inline struct page *alloc_khugepaged_hugepage(void)
1025 #endif
1026 {
1027         struct page *page;
1028
1029 #ifdef CONFIG_FINEGRAINED_THP
1030         page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
1031                            hpage_order);
1032 #else
1033         page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
1034                            HPAGE_PMD_ORDER);
1035 #endif
1036         if (page)
1037                 prep_transhuge_page(page);
1038         return page;
1039 }
1040
1041 static struct page *khugepaged_alloc_hugepage(bool *wait)
1042 {
1043         struct page *hpage;
1044
1045         do {
1046 #ifdef CONFIG_FINEGRAINED_THP
1047                 hpage = alloc_khugepaged_hugepage(HPAGE_PMD_ORDER);
1048 #else
1049                 hpage = alloc_khugepaged_hugepage();
1050 #endif
1051                 if (!hpage) {
1052                         count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1053                         if (!*wait)
1054                                 return NULL;
1055
1056                         *wait = false;
1057                         khugepaged_alloc_sleep();
1058                 } else
1059                         count_vm_event(THP_COLLAPSE_ALLOC);
1060         } while (unlikely(!hpage) && likely(khugepaged_enabled()));
1061
1062         return hpage;
1063 }
1064
1065 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
1066 {
1067         /*
1068          * If the hpage allocated earlier was briefly exposed in page cache
1069          * before collapse_file() failed, it is possible that racing lookups
1070          * have not yet completed, and would then be unpleasantly surprised by
1071          * finding the hpage reused for the same mapping at a different offset.
1072          * Just release the previous allocation if there is any danger of that.
1073          */
1074         if (*hpage && page_count(*hpage) > 1) {
1075                 put_page(*hpage);
1076                 *hpage = NULL;
1077         }
1078
1079         if (!*hpage)
1080                 *hpage = khugepaged_alloc_hugepage(wait);
1081
1082         if (unlikely(!*hpage))
1083                 return false;
1084
1085         return true;
1086 }
1087
1088 #ifdef CONFIG_FINEGRAINED_THP
1089 static struct page *
1090 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node, int hpage_type)
1091 {
1092         struct page *page;
1093
1094         if (hpage_type == THP_TYPE_64KB)
1095                 page = alloc_khugepaged_hugepage(HPAGE_CONT_PTE_ORDER);
1096         else {
1097                 VM_BUG_ON(!*hpage);
1098                 page = *hpage;
1099         }
1100         return page;
1101 }
1102 #else /* CONFIG_FINEGRAINED_THP */
1103 static struct page *
1104 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
1105 {
1106         VM_BUG_ON(!*hpage);
1107
1108         return  *hpage;
1109 }
1110 #endif /* CONFIG_FINEGRAINED_THP */
1111 #endif
1112
1113 /*
1114  * If mmap_lock temporarily dropped, revalidate vma
1115  * before taking mmap_lock.
1116  * Return 0 if succeeds, otherwise return none-zero
1117  * value (scan code).
1118  */
1119
1120 #ifdef CONFIG_FINEGRAINED_THP
1121 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
1122                 struct vm_area_struct **vmap, int hpage_type)
1123 #else
1124 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
1125                 struct vm_area_struct **vmap)
1126 #endif
1127 {
1128         struct vm_area_struct *vma;
1129         unsigned long hstart, hend;
1130
1131         if (unlikely(khugepaged_test_exit(mm)))
1132                 return SCAN_ANY_PROCESS;
1133
1134         *vmap = vma = find_vma(mm, address);
1135         if (!vma)
1136                 return SCAN_VMA_NULL;
1137
1138 #ifdef CONFIG_FINEGRAINED_THP
1139         if (hpage_type == THP_TYPE_64KB) {
1140                 hstart = (vma->vm_start + ~HPAGE_CONT_PTE_MASK) & HPAGE_CONT_PTE_MASK;
1141                 hend = vma->vm_end & HPAGE_CONT_PTE_MASK;
1142                 if (address < hstart || address + HPAGE_CONT_PTE_SIZE > hend)
1143                         return SCAN_ADDRESS_RANGE;
1144                 if (!hugepage_vma_check(vma, vma->vm_flags))
1145                         return SCAN_VMA_CHECK;
1146                 return 0;
1147         }
1148 #endif /* CONFIG_FINEGRAINED_THP */
1149         hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1150         hend = vma->vm_end & HPAGE_PMD_MASK;
1151         if (address < hstart || address + HPAGE_PMD_SIZE > hend)
1152                 return SCAN_ADDRESS_RANGE;
1153         if (!hugepage_vma_check(vma, vma->vm_flags))
1154                 return SCAN_VMA_CHECK;
1155         /* Anon VMA expected */
1156         if (!vma->anon_vma || vma->vm_ops)
1157                 return SCAN_VMA_CHECK;
1158         return 0;
1159 }
1160
1161 /*
1162  * Bring missing pages in from swap, to complete THP collapse.
1163  * Only done if khugepaged_scan_pmd believes it is worthwhile.
1164  *
1165  * Called and returns without pte mapped or spinlocks held,
1166  * but with mmap_lock held to protect against vma changes.
1167  */
1168
1169 #ifdef CONFIG_FINEGRAINED_THP
1170 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
1171                                         struct vm_area_struct *vma,
1172                                         unsigned long address, pmd_t *pmd,
1173                                         int referenced, int hpage_type)
1174 #else /* CONFIG_FINEGRAINED_THP */
1175 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
1176                                         struct vm_area_struct *vma,
1177                                         unsigned long address, pmd_t *pmd,
1178                                         int referenced)
1179 #endif /* CONFIG_FINEGRAINED_THP */
1180 {
1181         int swapped_in = 0;
1182         vm_fault_t ret = 0;
1183         struct vm_fault vmf = {
1184                 .vma = vma,
1185                 .address = address,
1186                 .flags = FAULT_FLAG_ALLOW_RETRY,
1187                 .pmd = pmd,
1188                 .pgoff = linear_page_index(vma, address),
1189         };
1190 #ifdef CONFIG_FINEGRAINED_THP
1191         int hpage_size = (hpage_type == THP_TYPE_64KB) ?
1192                                                 HPAGE_CONT_PTE_SIZE : HPAGE_PMD_SIZE;
1193 #endif
1194
1195         vmf.pte = pte_offset_map(pmd, address);
1196         for (;
1197 #ifdef CONFIG_FINEGRAINED_THP
1198                         vmf.address < address + hpage_size;
1199 #else
1200                         vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
1201 #endif
1202                         vmf.pte++, vmf.address += PAGE_SIZE) {
1203                 vmf.orig_pte = *vmf.pte;
1204                 if (!is_swap_pte(vmf.orig_pte))
1205                         continue;
1206                 swapped_in++;
1207                 ret = do_swap_page(&vmf);
1208
1209                 /* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
1210                 if (ret & VM_FAULT_RETRY) {
1211                         mmap_read_lock(mm);
1212 #ifdef CONFIG_FINEGRAINED_THP
1213                         if (hugepage_vma_revalidate(mm, address, &vmf.vma, hpage_type))
1214 #else
1215                         if (hugepage_vma_revalidate(mm, address, &vmf.vma))
1216 #endif
1217                         {
1218                                 /* vma is no longer available, don't continue to swapin */
1219                                 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1220                                 return false;
1221                         }
1222                         /* check if the pmd is still valid */
1223                         if (mm_find_pmd(mm, address) != pmd) {
1224                                 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1225                                 return false;
1226                         }
1227                 }
1228                 if (ret & VM_FAULT_ERROR) {
1229                         trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1230                         return false;
1231                 }
1232                 /* pte is unmapped now, we need to map it */
1233                 vmf.pte = pte_offset_map(pmd, vmf.address);
1234         }
1235         vmf.pte--;
1236         pte_unmap(vmf.pte);
1237
1238         /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1239         if (swapped_in)
1240                 lru_add_drain();
1241
1242         trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
1243         return true;
1244 }
1245
1246 #ifdef CONFIG_FINEGRAINED_THP
1247 static void collapse_huge_page(struct mm_struct *mm,
1248                                    unsigned long address,
1249                                    struct page **hpage,
1250                                    int node, int referenced, int unmapped,
1251                                    int hpage_type)
1252 #else /* CONFIG_FINEGRAINED_THP */
1253 static void collapse_huge_page(struct mm_struct *mm,
1254                                    unsigned long address,
1255                                    struct page **hpage,
1256                                    int node, int referenced, int unmapped)
1257 #endif /* CONFIG_FINEGRAINED_THP */
1258 {
1259         LIST_HEAD(compound_pagelist);
1260         pmd_t *pmd, _pmd;
1261         pte_t *pte;
1262         pgtable_t pgtable;
1263         struct page *new_page;
1264         spinlock_t *pmd_ptl, *pte_ptl;
1265         int isolated = 0, result = 0;
1266         struct vm_area_struct *vma;
1267         struct mmu_notifier_range range;
1268         gfp_t gfp;
1269
1270 #ifdef CONFIG_FINEGRAINED_THP
1271         pte_t _pte;
1272
1273         VM_BUG_ON(address & (hpage_type == THP_TYPE_64KB ?
1274                                 ~HPAGE_CONT_PTE_MASK : ~HPAGE_PMD_MASK));
1275 #else
1276         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1277 #endif
1278
1279         /* Only allocate from the target node */
1280         gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1281
1282         /*
1283          * Before allocating the hugepage, release the mmap_lock read lock.
1284          * The allocation can take potentially a long time if it involves
1285          * sync compaction, and we do not need to hold the mmap_lock during
1286          * that. We will recheck the vma after taking it again in write mode.
1287          */
1288         mmap_read_unlock(mm);
1289 #ifdef CONFIG_FINEGRAINED_THP
1290         new_page = khugepaged_alloc_page(hpage, gfp, node, hpage_type);
1291 #else
1292         new_page = khugepaged_alloc_page(hpage, gfp, node);
1293 #endif
1294         if (!new_page) {
1295                 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1296                 goto out_nolock;
1297         }
1298
1299         if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
1300                 result = SCAN_CGROUP_CHARGE_FAIL;
1301                 goto out_nolock;
1302         }
1303         count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1304
1305         mmap_read_lock(mm);
1306 #ifdef CONFIG_FINEGRAINED_THP
1307         result = hugepage_vma_revalidate(mm, address, &vma, hpage_type);
1308 #else
1309         result = hugepage_vma_revalidate(mm, address, &vma);
1310 #endif
1311         if (result) {
1312                 mmap_read_unlock(mm);
1313                 goto out_nolock;
1314         }
1315
1316         pmd = mm_find_pmd(mm, address);
1317         if (!pmd) {
1318                 result = SCAN_PMD_NULL;
1319                 mmap_read_unlock(mm);
1320                 goto out_nolock;
1321         }
1322
1323         /*
1324          * __collapse_huge_page_swapin always returns with mmap_lock locked.
1325          * If it fails, we release mmap_lock and jump out_nolock.
1326          * Continuing to collapse causes inconsistency.
1327          */
1328 #ifdef CONFIG_FINEGRAINED_THP
1329         if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1330                                                      pmd, referenced, hpage_type)) {
1331                 mmap_read_unlock(mm);
1332                 goto out_nolock;
1333         }
1334 #else /* CONFIG_FINEGRAINED_THP */
1335         if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1336                                                      pmd, referenced)) {
1337                 mmap_read_unlock(mm);
1338                 goto out_nolock;
1339         }
1340 #endif /* CONFIG_FINEGRAINED_THP*/
1341
1342         mmap_read_unlock(mm);
1343         /*
1344          * Prevent all access to pagetables with the exception of
1345          * gup_fast later handled by the ptep_clear_flush and the VM
1346          * handled by the anon_vma lock + PG_lock.
1347          */
1348         mmap_write_lock(mm);
1349 #ifdef CONFIG_FINEGRAINED_THP
1350         result = hugepage_vma_revalidate(mm, address, &vma, hpage_type);
1351 #else
1352         result = hugepage_vma_revalidate(mm, address, &vma);
1353 #endif
1354         if (result)
1355                 goto out;
1356         /* check if the pmd is still valid */
1357         if (mm_find_pmd(mm, address) != pmd)
1358                 goto out;
1359
1360         anon_vma_lock_write(vma->anon_vma);
1361
1362 #ifdef CONFIG_FINEGRAINED_THP
1363         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1364                                 address, address + (hpage_type == THP_TYPE_64KB ?
1365                                 HPAGE_CONT_PTE_SIZE : HPAGE_PMD_SIZE));
1366 #else
1367         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1368                                 address, address + HPAGE_PMD_SIZE);
1369 #endif
1370         mmu_notifier_invalidate_range_start(&range);
1371
1372         pte = pte_offset_map(pmd, address);
1373         pte_ptl = pte_lockptr(mm, pmd);
1374
1375         pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1376         /*
1377          * After this gup_fast can't run anymore. This also removes
1378          * any huge TLB entry from the CPU so we won't allow
1379          * huge and small TLB entries for the same virtual address
1380          * to avoid the risk of CPU bugs in that area.
1381          */
1382 #ifdef CONFIG_FINEGRAINED_THP
1383         if (hpage_type == THP_TYPE_64KB)
1384                 /* FIXME: clearing ptes here causes
1385                  * __collapse_huge_page_isolate and __collapse_huge_page_copy
1386                  * to fail, __collapse_huge_page_copy also clears ptes
1387                  */
1388                 flush_tlb_range(vma, address, address + HPAGE_CONT_PTE_SIZE);
1389         else
1390 #endif /* CONFIG_FINEGRAINED_THP */
1391                 _pmd = pmdp_collapse_flush(vma, address, pmd);
1392         spin_unlock(pmd_ptl);
1393         mmu_notifier_invalidate_range_end(&range);
1394
1395         spin_lock(pte_ptl);
1396 #ifdef CONFIG_FINEGRAINED_THP
1397         isolated = __collapse_huge_page_isolate(vma, address, pte,
1398                         &compound_pagelist, hpage_type);
1399 #else /* CONFIG_FINEGRAINED_THP */
1400         isolated = __collapse_huge_page_isolate(vma, address, pte,
1401                         &compound_pagelist);
1402 #endif /* CONFIG_FINEGRAINED_THP */
1403         spin_unlock(pte_ptl);
1404
1405         if (unlikely(!isolated)) {
1406 #ifdef CONFIG_FINEGRAINED_THP
1407                 if (hpage_type == THP_TYPE_64KB) {
1408                         pte_unmap(pte);
1409                         anon_vma_unlock_write(vma->anon_vma);
1410                         result = SCAN_FAIL;
1411                         goto out;
1412                 }
1413 #endif /* CONFIG_FINEGRAINED_THP */
1414                 pte_unmap(pte);
1415                 spin_lock(pmd_ptl);
1416                 BUG_ON(!pmd_none(*pmd));
1417                 /*
1418                  * We can only use set_pmd_at when establishing
1419                  * hugepmds and never for establishing regular pmds that
1420                  * points to regular pagetables. Use pmd_populate for that
1421                  */
1422                 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1423                 spin_unlock(pmd_ptl);
1424                 anon_vma_unlock_write(vma->anon_vma);
1425                 result = SCAN_FAIL;
1426                 goto out;
1427         }
1428
1429         /*
1430          * All pages are isolated and locked so anon_vma rmap
1431          * can't run anymore.
1432          */
1433         anon_vma_unlock_write(vma->anon_vma);
1434
1435 #ifdef CONFIG_FINEGRAINED_THP
1436         __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1437                         &compound_pagelist, hpage_type);
1438 #else /* CONFIG_FINEGRAINED_THP */
1439         __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1440                         &compound_pagelist);
1441 #endif /* CONFIG_FINEGRAINED_THP */
1442         pte_unmap(pte);
1443         __SetPageUptodate(new_page);
1444
1445 #ifdef CONFIG_FINEGRAINED_THP
1446         if (hpage_type == THP_TYPE_64KB) {
1447                 /* 64KB hugepage */
1448                 _pte = arch_make_huge_pte(new_page, vma);
1449                 _pte = maybe_mkwrite(pte_mkdirty(_pte), vma);
1450         } else {
1451                 /* 2MB hugepage */
1452                 pgtable = pmd_pgtable(_pmd);
1453
1454                 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1455                 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1456         }
1457 #else /* CONFIG_FINEGRAINED_THP */
1458         pgtable = pmd_pgtable(_pmd);
1459
1460         _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1461         _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1462 #endif /* CONFIG_FINEGRAINED_THP */
1463         /*
1464          * spin_lock() below is not the equivalent of smp_wmb(), so
1465          * this is needed to avoid the copy_huge_page writes to become
1466          * visible after the set_pmd_at() write.
1467          */
1468         smp_wmb();
1469
1470         spin_lock(pmd_ptl);
1471 #ifdef CONFIG_FINEGRAINED_THP
1472         if (hpage_type == THP_TYPE_2MB)
1473 #endif
1474                 BUG_ON(!pmd_none(*pmd));
1475         page_add_new_anon_rmap(new_page, vma, address, true);
1476         lru_cache_add_inactive_or_unevictable(new_page, vma);
1477
1478 #ifdef CONFIG_FINEGRAINED_THP
1479         if (hpage_type == THP_TYPE_64KB)
1480                 arch_set_huge_pte_at(mm, address, pte, _pte, 0);
1481         else {
1482                 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1483                 set_pmd_at(mm, address, pmd, _pmd);
1484         }
1485         update_mmu_cache_pmd(vma, address, pmd);
1486 #else /* CONFIG_FINEGRAINED_THP */
1487         pgtable_trans_huge_deposit(mm, pmd, pgtable);
1488         set_pmd_at(mm, address, pmd, _pmd);
1489         update_mmu_cache_pmd(vma, address, pmd);
1490 #endif /* CONFIG_FINEGRAINED_THP */
1491         spin_unlock(pmd_ptl);
1492
1493 #ifdef CONFIG_FINEGRAINED_THP
1494         if (hpage_type == THP_TYPE_2MB)
1495 #endif
1496                 *hpage = NULL;
1497
1498         khugepaged_pages_collapsed++;
1499         result = SCAN_SUCCEED;
1500 out_up_write:
1501         mmap_write_unlock(mm);
1502 out_nolock:
1503         if (!IS_ERR_OR_NULL(*hpage))
1504                 mem_cgroup_uncharge(*hpage);
1505 #ifdef CONFIG_FINEGRAINED_THP
1506         if (result != SCAN_SUCCEED && new_page && hpage_type == THP_TYPE_64KB)
1507                 put_page(new_page);
1508 #endif
1509         trace_mm_collapse_huge_page(mm, isolated, result);
1510         return;
1511 out:
1512         goto out_up_write;
1513 }
1514
1515 #ifdef CONFIG_FINEGRAINED_THP
1516 static int khugepaged_scan_pmd(struct mm_struct *mm,
1517                                struct vm_area_struct *vma,
1518                                unsigned long address,
1519                                struct page **hpage, int hpage_type)
1520 #else /* CONFIG_FINEGRAINED_THP */
1521 static int khugepaged_scan_pmd(struct mm_struct *mm,
1522                                struct vm_area_struct *vma,
1523                                unsigned long address,
1524                                struct page **hpage)
1525 #endif /* CONFIG_FINEGRAINED_THP */
1526 {
1527         pmd_t *pmd;
1528         pte_t *pte, *_pte;
1529         int ret = 0, result = 0, referenced = 0;
1530         int none_or_zero = 0, shared = 0;
1531         struct page *page = NULL;
1532         unsigned long _address;
1533         spinlock_t *ptl;
1534         int node = NUMA_NO_NODE, unmapped = 0;
1535         bool writable = false;
1536
1537 #ifdef CONFIG_FINEGRAINED_THP
1538         int hpage_nr;
1539         int max_ptes_swap, max_ptes_none, max_ptes_shared;
1540
1541         if (hpage_type == THP_TYPE_64KB) {
1542                 VM_BUG_ON(address & ~HPAGE_CONT_PTE_MASK);
1543                 hpage_nr = HPAGE_CONT_PTE_NR;
1544                 max_ptes_swap = khugepaged_max_ptes_swap_64kb;
1545                 max_ptes_none = khugepaged_max_ptes_none_64kb;
1546                 max_ptes_shared = khugepaged_max_ptes_shared_64kb;
1547         } else {
1548                 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1549                 hpage_nr = HPAGE_PMD_NR;
1550                 max_ptes_swap = khugepaged_max_ptes_swap;
1551                 max_ptes_none = khugepaged_max_ptes_none;
1552                 max_ptes_shared = khugepaged_max_ptes_shared;
1553         }
1554 #else /* CONFIG_FINEGRAINED_THP */
1555         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1556 #endif /* CONFIG_FINEGRAINED_THP */
1557
1558         pmd = mm_find_pmd(mm, address);
1559         if (!pmd) {
1560                 result = SCAN_PMD_NULL;
1561                 goto out;
1562         }
1563
1564         memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1565         pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1566         for (_address = address, _pte = pte;
1567 #ifdef CONFIG_FINEGRAINED_THP
1568                 _pte < pte + hpage_nr;
1569 #else
1570                 _pte < pte+HPAGE_PMD_NR;
1571 #endif
1572              _pte++, _address += PAGE_SIZE) {
1573                 pte_t pteval = *_pte;
1574                 if (is_swap_pte(pteval)) {
1575 #ifdef CONFIG_FINEGRAINED_THP
1576                         if (++unmapped <= max_ptes_swap)
1577 #else
1578                         if (++unmapped <= khugepaged_max_ptes_swap)
1579 #endif
1580                         {
1581                                 /*
1582                                  * Always be strict with uffd-wp
1583                                  * enabled swap entries.  Please see
1584                                  * comment below for pte_uffd_wp().
1585                                  */
1586                                 if (pte_swp_uffd_wp(pteval)) {
1587                                         result = SCAN_PTE_UFFD_WP;
1588                                         goto out_unmap;
1589                                 }
1590                                 continue;
1591                         } else {
1592                                 result = SCAN_EXCEED_SWAP_PTE;
1593                                 goto out_unmap;
1594                         }
1595                 }
1596                 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1597                         if (!userfaultfd_armed(vma) &&
1598 #ifdef CONFIG_FINEGRAINED_THP
1599                             ++none_or_zero <= max_ptes_none
1600 #else
1601                             ++none_or_zero <= khugepaged_max_ptes_none
1602 #endif
1603                         )
1604                         {
1605                                 continue;
1606                         } else {
1607                                 result = SCAN_EXCEED_NONE_PTE;
1608                                 goto out_unmap;
1609                         }
1610                 }
1611                 if (!pte_present(pteval)) {
1612                         result = SCAN_PTE_NON_PRESENT;
1613                         goto out_unmap;
1614                 }
1615                 if (pte_uffd_wp(pteval)) {
1616                         /*
1617                          * Don't collapse the page if any of the small
1618                          * PTEs are armed with uffd write protection.
1619                          * Here we can also mark the new huge pmd as
1620                          * write protected if any of the small ones is
1621                          * marked but that could bring uknown
1622                          * userfault messages that falls outside of
1623                          * the registered range.  So, just be simple.
1624                          */
1625                         result = SCAN_PTE_UFFD_WP;
1626                         goto out_unmap;
1627                 }
1628                 if (pte_write(pteval))
1629                         writable = true;
1630
1631                 page = vm_normal_page(vma, _address, pteval);
1632                 if (unlikely(!page)) {
1633                         result = SCAN_PAGE_NULL;
1634                         goto out_unmap;
1635                 }
1636
1637 #ifdef CONFIG_FINEGRAINED_THP
1638                 if (PageCompound(page) && PageTransHuge(compound_head(page))) {
1639                         result = SCAN_PAGE_COMPOUND;
1640                         goto out_unmap;
1641                 }
1642
1643                 if (page_mapcount(page) > 1 &&
1644                                 ++shared > max_ptes_shared)
1645 #else
1646                 if (page_mapcount(page) > 1 &&
1647                                 ++shared > khugepaged_max_ptes_shared)
1648 #endif
1649                 {
1650                         result = SCAN_EXCEED_SHARED_PTE;
1651                         goto out_unmap;
1652                 }
1653
1654                 page = compound_head(page);
1655
1656                 /*
1657                  * Record which node the original page is from and save this
1658                  * information to khugepaged_node_load[].
1659                  * Khupaged will allocate hugepage from the node has the max
1660                  * hit record.
1661                  */
1662                 node = page_to_nid(page);
1663                 if (khugepaged_scan_abort(node)) {
1664                         result = SCAN_SCAN_ABORT;
1665                         goto out_unmap;
1666                 }
1667                 khugepaged_node_load[node]++;
1668                 if (!PageLRU(page)) {
1669                         result = SCAN_PAGE_LRU;
1670                         goto out_unmap;
1671                 }
1672                 if (PageLocked(page)) {
1673                         result = SCAN_PAGE_LOCK;
1674                         goto out_unmap;
1675                 }
1676                 if (!PageAnon(page)) {
1677                         result = SCAN_PAGE_ANON;
1678                         goto out_unmap;
1679                 }
1680
1681                 /*
1682                  * Check if the page has any GUP (or other external) pins.
1683                  *
1684                  * Here the check is racy it may see totmal_mapcount > refcount
1685                  * in some cases.
1686                  * For example, one process with one forked child process.
1687                  * The parent has the PMD split due to MADV_DONTNEED, then
1688                  * the child is trying unmap the whole PMD, but khugepaged
1689                  * may be scanning the parent between the child has
1690                  * PageDoubleMap flag cleared and dec the mapcount.  So
1691                  * khugepaged may see total_mapcount > refcount.
1692                  *
1693                  * But such case is ephemeral we could always retry collapse
1694                  * later.  However it may report false positive if the page
1695                  * has excessive GUP pins (i.e. 512).  Anyway the same check
1696                  * will be done again later the risk seems low.
1697                  */
1698                 if (!is_refcount_suitable(page)) {
1699                         result = SCAN_PAGE_COUNT;
1700                         goto out_unmap;
1701                 }
1702                 if (pte_young(pteval) ||
1703                     page_is_young(page) || PageReferenced(page) ||
1704                     mmu_notifier_test_young(vma->vm_mm, address))
1705                         referenced++;
1706         }
1707         if (!writable) {
1708                 result = SCAN_PAGE_RO;
1709         } else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1710                 result = SCAN_LACK_REFERENCED_PAGE;
1711         } else {
1712                 result = SCAN_SUCCEED;
1713                 ret = 1;
1714         }
1715 out_unmap:
1716         pte_unmap_unlock(pte, ptl);
1717         if (ret) {
1718                 node = khugepaged_find_target_node();
1719                 /* collapse_huge_page will return with the mmap_lock released */
1720 #ifdef CONFIG_FINEGRAINED_THP
1721                 collapse_huge_page(mm, address, hpage, node,
1722                                 referenced, unmapped, hpage_type);
1723 #else
1724                 collapse_huge_page(mm, address, hpage, node,
1725                                 referenced, unmapped);
1726 #endif
1727         }
1728 out:
1729         trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1730                                      none_or_zero, result, unmapped);
1731         return ret;
1732 }
1733
1734 static void collect_mm_slot(struct mm_slot *mm_slot)
1735 {
1736         struct mm_struct *mm = mm_slot->mm;
1737
1738         lockdep_assert_held(&khugepaged_mm_lock);
1739
1740         if (khugepaged_test_exit(mm)) {
1741 #ifdef CONFIG_FINEGRAINED_THP
1742                 clear_hint_list(mm_slot);
1743 #endif
1744                 /* free mm_slot */
1745                 hash_del(&mm_slot->hash);
1746                 list_del(&mm_slot->mm_node);
1747
1748                 /*
1749                  * Not strictly needed because the mm exited already.
1750                  *
1751                  * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1752                  */
1753
1754                 /* khugepaged_mm_lock actually not necessary for the below */
1755                 free_mm_slot(mm_slot);
1756                 mmdrop(mm);
1757         }
1758 }
1759
1760 #ifdef CONFIG_SHMEM
1761 /*
1762  * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1763  * khugepaged should try to collapse the page table.
1764  */
1765 #ifdef CONFIG_FINEGRAINED_THP
1766 static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1767                                          unsigned long addr, int hpage_type)
1768 #else
1769 static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1770                                          unsigned long addr)
1771 #endif
1772 {
1773         struct mm_slot *mm_slot;
1774
1775 #ifdef CONFIG_FINEGRAINED_THP
1776         VM_BUG_ON(addr & (hpage_type == THP_TYPE_64KB ?
1777                                         ~HPAGE_CONT_PTE_MASK :~HPAGE_PMD_MASK));
1778 #else
1779         VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1780 #endif
1781
1782         spin_lock(&khugepaged_mm_lock);
1783         mm_slot = get_mm_slot(mm);
1784 #ifdef CONFIG_FINEGRAINED_THP
1785         if (hpage_type == THP_TYPE_64KB)
1786                 addr |= 0x01;
1787 #endif
1788         if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1789                 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1790         spin_unlock(&khugepaged_mm_lock);
1791         return 0;
1792 }
1793
1794 /**
1795  * Try to collapse a pte-mapped THP for mm at address haddr.
1796  *
1797  * This function checks whether all the PTEs in the PMD are pointing to the
1798  * right THP. If so, retract the page table so the THP can refault in with
1799  * as pmd-mapped.
1800  */
1801 void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1802 {
1803         unsigned long haddr = addr & HPAGE_PMD_MASK;
1804         struct vm_area_struct *vma = find_vma(mm, haddr);
1805         struct page *hpage;
1806         pte_t *start_pte, *pte;
1807         pmd_t *pmd, _pmd;
1808         spinlock_t *ptl;
1809         int count = 0;
1810         int i;
1811 #ifdef CONFIG_FINEGRAINED_THP
1812         int hpage_type = (addr & 0x01) ? THP_TYPE_64KB : THP_TYPE_2MB;
1813         int hpage_nr = (hpage_type == THP_TYPE_64KB) ?
1814                                                         HPAGE_CONT_PTE_NR : HPAGE_PMD_NR;
1815         int hpage_size = (hpage_type == THP_TYPE_64KB) ?
1816                                                         HPAGE_CONT_PTE_SIZE : HPAGE_PMD_SIZE;
1817
1818         if (hpage_type == THP_TYPE_64KB)
1819                 haddr = addr & HPAGE_CONT_PTE_MASK;
1820 #endif
1821
1822 #ifdef CONFIG_FINEGRAINED_THP
1823         if (!vma || !vma->vm_file ||
1824             vma->vm_start > haddr || vma->vm_end < haddr + hpage_size)
1825                 return;
1826 #else /* CONFIG_FINEGRAINED_THP */
1827         if (!vma || !vma->vm_file ||
1828             vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
1829                 return;
1830 #endif /* CONFIG_FINEGRAINED_THP */
1831
1832         /*
1833          * This vm_flags may not have VM_HUGEPAGE if the page was not
1834          * collapsed by this mm. But we can still collapse if the page is
1835          * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1836          * will not fail the vma for missing VM_HUGEPAGE
1837          */
1838         if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1839                 return;
1840
1841         hpage = find_lock_page(vma->vm_file->f_mapping,
1842                                linear_page_index(vma, haddr));
1843         if (!hpage)
1844                 return;
1845
1846         if (!PageHead(hpage))
1847                 goto drop_hpage;
1848
1849         pmd = mm_find_pmd(mm, haddr);
1850         if (!pmd)
1851                 goto drop_hpage;
1852
1853         start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1854 #ifdef CONFIG_FINEGRAINED_THP
1855         if (pte_cont(*start_pte)) {
1856                 pte_unmap_unlock(start_pte, ptl);
1857                 goto drop_hpage;
1858         }
1859 #endif
1860
1861         /* step 1: check all mapped PTEs are to the right huge page */
1862         for (i = 0, addr = haddr, pte = start_pte;
1863 #ifdef CONFIG_FINEGRAINED_THP
1864              i < hpage_nr;
1865 #else
1866              i < HPAGE_PMD_NR;
1867 #endif
1868              i++, addr += PAGE_SIZE, pte++) {
1869                 struct page *page;
1870
1871                 /* empty pte, skip */
1872                 if (pte_none(*pte))
1873                         continue;
1874
1875                 /* page swapped out, abort */
1876                 if (!pte_present(*pte))
1877                         goto abort;
1878
1879                 page = vm_normal_page(vma, addr, *pte);
1880
1881                 /*
1882                  * Note that uprobe, debugger, or MAP_PRIVATE may change the
1883                  * page table, but the new page will not be a subpage of hpage.
1884                  */
1885                 if (hpage + i != page)
1886                         goto abort;
1887                 count++;
1888         }
1889
1890         /* step 2: adjust rmap */
1891         for (i = 0, addr = haddr, pte = start_pte;
1892 #ifdef CONFIG_FINEGRAINED_THP
1893                 i < hpage_nr;
1894 #else
1895             i < HPAGE_PMD_NR;
1896 #endif
1897              i++, addr += PAGE_SIZE, pte++) {
1898                 struct page *page;
1899
1900                 if (pte_none(*pte))
1901                         continue;
1902                 page = vm_normal_page(vma, addr, *pte);
1903                 page_remove_rmap(page, false);
1904         }
1905
1906         pte_unmap_unlock(start_pte, ptl);
1907
1908         /* step 3: set proper refcount and mm_counters. */
1909         if (count) {
1910                 page_ref_sub(hpage, count);
1911                 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1912         }
1913
1914         /* step 4: collapse pmd */
1915         ptl = pmd_lock(vma->vm_mm, pmd);
1916 #ifdef CONFIG_FINEGRAINED_THP
1917         if (hpage_type == THP_TYPE_64KB) {
1918                 pte_t *ptep = pte_offset_map(pmd, haddr);
1919                 arch_clear_huge_pte_range(vma->vm_mm, haddr, ptep);
1920                 spin_unlock(ptl);
1921         } else {
1922                 _pmd = pmdp_collapse_flush(vma, haddr, pmd);
1923                 spin_unlock(ptl);
1924                 mm_dec_nr_ptes(mm);
1925                 pte_free(mm, pmd_pgtable(_pmd));
1926         }
1927 #else /* CONFIG_FINEGRAINED_THP*/
1928         _pmd = pmdp_collapse_flush(vma, haddr, pmd);
1929         spin_unlock(ptl);
1930         mm_dec_nr_ptes(mm);
1931         pte_free(mm, pmd_pgtable(_pmd));
1932 #endif /* CONFIG_FINEGRAINED_THP */
1933
1934 drop_hpage:
1935         unlock_page(hpage);
1936         put_page(hpage);
1937         return;
1938
1939 abort:
1940         pte_unmap_unlock(start_pte, ptl);
1941         goto drop_hpage;
1942 }
1943
1944 static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1945 {
1946         struct mm_struct *mm = mm_slot->mm;
1947         int i;
1948
1949         if (likely(mm_slot->nr_pte_mapped_thp == 0))
1950                 return 0;
1951
1952         if (!mmap_write_trylock(mm))
1953                 return -EBUSY;
1954
1955         if (unlikely(khugepaged_test_exit(mm)))
1956                 goto out;
1957
1958         for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1959                 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1960
1961 out:
1962         mm_slot->nr_pte_mapped_thp = 0;
1963         mmap_write_unlock(mm);
1964         return 0;
1965 }
1966
1967 #ifdef CONFIG_FINEGRAINED_THP
1968 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
1969                                                         int hpage_type)
1970 #else
1971 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1972 #endif
1973 {
1974         struct vm_area_struct *vma;
1975         struct mm_struct *mm;
1976         unsigned long addr;
1977         pmd_t *pmd, _pmd;
1978 #ifdef CONFIG_FINEGRAINED_THP
1979         pte_t *ptep;
1980         int hpage_size = (hpage_type == THP_TYPE_64KB) ?
1981                                 HPAGE_CONT_PTE_SIZE : HPAGE_PMD_SIZE;
1982 #endif /* CONFIG_FINEGRAINED_THP */
1983
1984         i_mmap_lock_write(mapping);
1985         vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1986                 /*
1987                  * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1988                  * got written to. These VMAs are likely not worth investing
1989                  * mmap_write_lock(mm) as PMD-mapping is likely to be split
1990                  * later.
1991                  *
1992                  * Not that vma->anon_vma check is racy: it can be set up after
1993                  * the check but before we took mmap_lock by the fault path.
1994                  * But page lock would prevent establishing any new ptes of the
1995                  * page, so we are safe.
1996                  *
1997                  * An alternative would be drop the check, but check that page
1998                  * table is clear before calling pmdp_collapse_flush() under
1999                  * ptl. It has higher chance to recover THP for the VMA, but
2000                  * has higher cost too.
2001                  */
2002                 if (vma->anon_vma)
2003                         continue;
2004                 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
2005 #ifdef CONFIG_FINEGRAINED_THP
2006                 if (hpage_type == THP_TYPE_64KB && addr & ~HPAGE_CONT_PTE_MASK)
2007                         continue;
2008                 else if (hpage_type == THP_TYPE_2MB && addr & ~HPAGE_PMD_MASK)
2009                         continue;
2010                 if (vma->vm_end < addr + hpage_size)
2011                         continue;
2012
2013                 mm = vma->vm_mm;
2014                 pmd = mm_find_pmd(mm, addr);
2015                 if (!pmd)
2016                         continue;
2017                 if (mmap_write_trylock(mm)) {
2018                         spinlock_t *ptl = pmd_lock(mm, pmd);
2019                         if (hpage_type == THP_TYPE_64KB) {
2020                                 /* 64KB hugepage */
2021                                 ptep = pte_offset_map(pmd, addr);
2022                                 /* pte maps are established on page fault handling */
2023                                 arch_clear_huge_pte_range(mm, addr, ptep);
2024                                 spin_unlock(ptl);
2025                         } else {
2026                                 /* 2MB hugepage */
2027                                 /*
2028                                  * We need exclusive mmap_sem to retract page table.
2029                                  *
2030                                  * We use trylock due to lock inversion: we need to acquire
2031                                  * mmap_sem while holding page lock. Fault path does it in
2032                                  * reverse order. Trylock is a way to avoid deadlock.
2033                                  */
2034                                 _pmd = pmdp_collapse_flush(vma, addr, pmd);
2035                                 spin_unlock(ptl);
2036
2037                                 mm_dec_nr_ptes(mm);
2038                                 pte_free(mm, pmd_pgtable(_pmd));
2039                         }
2040                         mmap_write_unlock(mm);
2041                 } else
2042                         khugepaged_add_pte_mapped_thp(vma->vm_mm, addr, hpage_type);
2043 #else /* CONFIG_FINEGRAINED_THP */
2044                 if (addr & ~HPAGE_PMD_MASK)
2045                         continue;
2046                 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
2047                         continue;
2048                 mm = vma->vm_mm;
2049                 pmd = mm_find_pmd(mm, addr);
2050                 if (!pmd)
2051                         continue;
2052                 /*
2053                  * We need exclusive mmap_lock to retract page table.
2054                  *
2055                  * We use trylock due to lock inversion: we need to acquire
2056                  * mmap_lock while holding page lock. Fault path does it in
2057                  * reverse order. Trylock is a way to avoid deadlock.
2058                  */
2059                 if (mmap_write_trylock(mm)) {
2060                         if (!khugepaged_test_exit(mm)) {
2061                                 spinlock_t *ptl = pmd_lock(mm, pmd);
2062                                 /* assume page table is clear */
2063                                 _pmd = pmdp_collapse_flush(vma, addr, pmd);
2064                                 spin_unlock(ptl);
2065                                 mm_dec_nr_ptes(mm);
2066                                 pte_free(mm, pmd_pgtable(_pmd));
2067                         }
2068                         mmap_write_unlock(mm);
2069                 } else {
2070                         /* Try again later */
2071                         khugepaged_add_pte_mapped_thp(mm, addr);
2072                 }
2073 #endif /* CONFIG_FINEGRAINED_THP */
2074         }
2075         i_mmap_unlock_write(mapping);
2076 }
2077
2078 /**
2079  * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
2080  *
2081  * Basic scheme is simple, details are more complex:
2082  *  - allocate and lock a new huge page;
2083  *  - scan page cache replacing old pages with the new one
2084  *    + swap/gup in pages if necessary;
2085  *    + fill in gaps;
2086  *    + keep old pages around in case rollback is required;
2087  *  - if replacing succeeds:
2088  *    + copy data over;
2089  *    + free old pages;
2090  *    + unlock huge page;
2091  *  - if replacing failed;
2092  *    + put all pages back and unfreeze them;
2093  *    + restore gaps in the page cache;
2094  *    + unlock and free huge page;
2095  */
2096 #ifdef CONFIG_FINEGRAINED_THP
2097 static void collapse_file(struct mm_struct *mm,
2098                 struct file *file, pgoff_t start,
2099                 struct page **hpage, int node, int hpage_type)
2100 #else /* CONFIG_FINEGRAINED_THP */
2101 static void collapse_file(struct mm_struct *mm,
2102                 struct file *file, pgoff_t start,
2103                 struct page **hpage, int node)
2104 #endif /* CONFIG_FINEGRAINED_THP */
2105 {
2106         struct address_space *mapping = file->f_mapping;
2107         gfp_t gfp;
2108         struct page *new_page;
2109 #ifdef CONFIG_FINEGRAINED_THP
2110         int hpage_nr = (hpage_type == THP_TYPE_64KB ?
2111                                         HPAGE_CONT_PTE_NR : HPAGE_PMD_NR);
2112         int hpage_order = (hpage_type == THP_TYPE_64KB ?
2113                                         HPAGE_CONT_PTE_ORDER : HPAGE_PMD_ORDER);
2114         pgoff_t index, end = start + hpage_nr;
2115 #else /* CONFIG_FINEGRAINED_THP */
2116         pgoff_t index, end = start + HPAGE_PMD_NR;
2117 #endif /* CONFIG_FINEGRAINED_THP */
2118         LIST_HEAD(pagelist);
2119 #ifdef CONFIG_FINEGRAINED_THP
2120         XA_STATE_ORDER(xas, &mapping->i_pages, start, hpage_order);
2121 #else
2122         XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
2123 #endif
2124         int nr_none = 0, result = SCAN_SUCCEED;
2125         bool is_shmem = shmem_file(file);
2126
2127         VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
2128 #ifdef CONFIG_FINEGRAINED_THP
2129         VM_BUG_ON(start & (hpage_nr - 1));
2130 #else
2131         VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
2132 #endif
2133
2134         /* Only allocate from the target node */
2135         gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
2136
2137 #ifdef CONFIG_FINEGRAINED_THP
2138         new_page = khugepaged_alloc_page(hpage, gfp, node, hpage_type);
2139 #else
2140         new_page = khugepaged_alloc_page(hpage, gfp, node);
2141 #endif
2142         if (!new_page) {
2143                 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
2144                 goto out;
2145         }
2146
2147         if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
2148                 result = SCAN_CGROUP_CHARGE_FAIL;
2149                 goto out;
2150         }
2151         count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
2152
2153         /* This will be less messy when we use multi-index entries */
2154         do {
2155                 xas_lock_irq(&xas);
2156                 xas_create_range(&xas);
2157                 if (!xas_error(&xas))
2158                         break;
2159                 xas_unlock_irq(&xas);
2160                 if (!xas_nomem(&xas, GFP_KERNEL)) {
2161                         result = SCAN_FAIL;
2162                         goto out;
2163                 }
2164         } while (1);
2165
2166         __SetPageLocked(new_page);
2167         if (is_shmem)
2168                 __SetPageSwapBacked(new_page);
2169         new_page->index = start;
2170         new_page->mapping = mapping;
2171
2172         /*
2173          * At this point the new_page is locked and not up-to-date.
2174          * It's safe to insert it into the page cache, because nobody would
2175          * be able to map it or use it in another way until we unlock it.
2176          */
2177
2178         xas_set(&xas, start);
2179         for (index = start; index < end; index++) {
2180                 struct page *page = xas_next(&xas);
2181
2182                 VM_BUG_ON(index != xas.xa_index);
2183                 if (is_shmem) {
2184                         if (!page) {
2185                                 /*
2186                                  * Stop if extent has been truncated or
2187                                  * hole-punched, and is now completely
2188                                  * empty.
2189                                  */
2190                                 if (index == start) {
2191                                         if (!xas_next_entry(&xas, end - 1)) {
2192                                                 result = SCAN_TRUNCATED;
2193                                                 goto xa_locked;
2194                                         }
2195                                         xas_set(&xas, index);
2196                                 }
2197                                 if (!shmem_charge(mapping->host, 1)) {
2198                                         result = SCAN_FAIL;
2199                                         goto xa_locked;
2200                                 }
2201                                 xas_store(&xas, new_page);
2202                                 nr_none++;
2203                                 continue;
2204                         }
2205
2206                         if (xa_is_value(page) || !PageUptodate(page)) {
2207                                 xas_unlock_irq(&xas);
2208                                 /* swap in or instantiate fallocated page */
2209                                 if (shmem_getpage(mapping->host, index, &page,
2210                                                   SGP_NOHUGE)) {
2211                                         result = SCAN_FAIL;
2212                                         goto xa_unlocked;
2213                                 }
2214                         } else if (trylock_page(page)) {
2215                                 get_page(page);
2216                                 xas_unlock_irq(&xas);
2217                         } else {
2218                                 result = SCAN_PAGE_LOCK;
2219                                 goto xa_locked;
2220                         }
2221                 } else {        /* !is_shmem */
2222                         if (!page || xa_is_value(page)) {
2223                                 xas_unlock_irq(&xas);
2224                                 page_cache_sync_readahead(mapping, &file->f_ra,
2225                                                           file, index,
2226                                                           end - index);
2227                                 /* drain pagevecs to help isolate_lru_page() */
2228                                 lru_add_drain();
2229                                 page = find_lock_page(mapping, index);
2230                                 if (unlikely(page == NULL)) {
2231                                         result = SCAN_FAIL;
2232                                         goto xa_unlocked;
2233                                 }
2234                         } else if (PageDirty(page)) {
2235                                 /*
2236                                  * khugepaged only works on read-only fd,
2237                                  * so this page is dirty because it hasn't
2238                                  * been flushed since first write. There
2239                                  * won't be new dirty pages.
2240                                  *
2241                                  * Trigger async flush here and hope the
2242                                  * writeback is done when khugepaged
2243                                  * revisits this page.
2244                                  *
2245                                  * This is a one-off situation. We are not
2246                                  * forcing writeback in loop.
2247                                  */
2248                                 xas_unlock_irq(&xas);
2249                                 filemap_flush(mapping);
2250                                 result = SCAN_FAIL;
2251                                 goto xa_unlocked;
2252                         } else if (PageWriteback(page)) {
2253                                 xas_unlock_irq(&xas);
2254                                 result = SCAN_FAIL;
2255                                 goto xa_unlocked;
2256                         } else if (trylock_page(page)) {
2257                                 get_page(page);
2258                                 xas_unlock_irq(&xas);
2259                         } else {
2260                                 result = SCAN_PAGE_LOCK;
2261                                 goto xa_locked;
2262                         }
2263                 }
2264
2265                 /*
2266                  * The page must be locked, so we can drop the i_pages lock
2267                  * without racing with truncate.
2268                  */
2269                 VM_BUG_ON_PAGE(!PageLocked(page), page);
2270
2271                 /* make sure the page is up to date */
2272                 if (unlikely(!PageUptodate(page))) {
2273                         result = SCAN_FAIL;
2274                         goto out_unlock;
2275                 }
2276
2277                 /*
2278                  * If file was truncated then extended, or hole-punched, before
2279                  * we locked the first page, then a THP might be there already.
2280                  */
2281                 if (PageTransCompound(page)) {
2282                         result = SCAN_PAGE_COMPOUND;
2283                         goto out_unlock;
2284                 }
2285
2286                 if (page_mapping(page) != mapping) {
2287                         result = SCAN_TRUNCATED;
2288                         goto out_unlock;
2289                 }
2290
2291                 if (!is_shmem && (PageDirty(page) ||
2292                                   PageWriteback(page))) {
2293                         /*
2294                          * khugepaged only works on read-only fd, so this
2295                          * page is dirty because it hasn't been flushed
2296                          * since first write.
2297                          */
2298                         result = SCAN_FAIL;
2299                         goto out_unlock;
2300                 }
2301
2302                 if (isolate_lru_page(page)) {
2303                         result = SCAN_DEL_PAGE_LRU;
2304                         goto out_unlock;
2305                 }
2306
2307                 if (page_has_private(page) &&
2308                     !try_to_release_page(page, GFP_KERNEL)) {
2309                         result = SCAN_PAGE_HAS_PRIVATE;
2310                         putback_lru_page(page);
2311                         goto out_unlock;
2312                 }
2313
2314                 if (page_mapped(page))
2315                         unmap_mapping_pages(mapping, index, 1, false);
2316
2317                 xas_lock_irq(&xas);
2318                 xas_set(&xas, index);
2319
2320                 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
2321                 VM_BUG_ON_PAGE(page_mapped(page), page);
2322
2323                 /*
2324                  * The page is expected to have page_count() == 3:
2325                  *  - we hold a pin on it;
2326                  *  - one reference from page cache;
2327                  *  - one from isolate_lru_page;
2328                  */
2329                 if (!page_ref_freeze(page, 3)) {
2330                         result = SCAN_PAGE_COUNT;
2331                         xas_unlock_irq(&xas);
2332                         putback_lru_page(page);
2333                         goto out_unlock;
2334                 }
2335
2336                 /*
2337                  * Add the page to the list to be able to undo the collapse if
2338                  * something go wrong.
2339                  */
2340                 list_add_tail(&page->lru, &pagelist);
2341
2342                 /* Finally, replace with the new page. */
2343                 xas_store(&xas, new_page);
2344                 continue;
2345 out_unlock:
2346                 unlock_page(page);
2347                 put_page(page);
2348                 goto xa_unlocked;
2349         }
2350
2351         if (is_shmem)
2352 #ifdef CONFIG_FINEGRAINED_THP
2353                 if (hpage_type == THP_TYPE_64KB)
2354                         __inc_node_page_state(new_page, NR_SHMEM_64KB_THPS);
2355                 else
2356                         __inc_node_page_state(new_page, NR_SHMEM_THPS);
2357 #else /* CONFIG_FINEGRAINED_THP */
2358                 __inc_node_page_state(new_page, NR_SHMEM_THPS);
2359 #endif /* CONFIG_FINEGRAINED_THP */
2360         else {
2361 #ifdef CONFIG_FINEGRAINED_THP
2362                 if (hpage_type == THP_TYPE_64KB)
2363                         __inc_node_page_state(new_page, NR_FILE_64KB_THPS);
2364                 else
2365                         __inc_node_page_state(new_page, NR_FILE_THPS);
2366 #else /* CONFIG_FINEGRAINED_THP */
2367                 __inc_node_page_state(new_page, NR_FILE_THPS);
2368 #endif /* CONFIG_FINEGRAINED_THP */
2369                 filemap_nr_thps_inc(mapping);
2370         }
2371
2372         if (nr_none) {
2373                 __mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
2374                 if (is_shmem)
2375                         __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
2376         }
2377
2378 xa_locked:
2379         xas_unlock_irq(&xas);
2380 xa_unlocked:
2381
2382         if (result == SCAN_SUCCEED) {
2383                 struct page *page, *tmp;
2384 #ifdef CONFIG_FINEGRAINED_THP
2385                 int offset = 0;
2386 #endif
2387
2388                 /*
2389                  * Replacing old pages with new one has succeeded, now we
2390                  * need to copy the content and free the old pages.
2391                  */
2392                 index = start;
2393                 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2394 #ifdef CONFIG_FINEGRAINED_THP
2395                         if (hpage_type != THP_TYPE_64KB) {
2396                                 while (index < page->index) {
2397                                         clear_highpage(new_page + (index % HPAGE_PMD_NR));
2398                                         index++;
2399                                 }
2400                         }
2401
2402                         if (hpage_type == THP_TYPE_64KB) {
2403                                 copy_highpage(new_page + offset, page);
2404                                 offset++;
2405                         } else
2406                                 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
2407                                                 page);
2408 #else /* CONFIG_FINEGRAINED_THP */
2409                         while (index < page->index) {
2410                                 clear_highpage(new_page + (index % HPAGE_PMD_NR));
2411                                 index++;
2412                         }
2413                         copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
2414                                         page);
2415 #endif /* CONFIG_FINEGRAINED_THP */
2416                         list_del(&page->lru);
2417                         page->mapping = NULL;
2418                         page_ref_unfreeze(page, 1);
2419                         ClearPageActive(page);
2420                         ClearPageUnevictable(page);
2421                         unlock_page(page);
2422                         put_page(page);
2423                         index++;
2424                 }
2425 #ifdef CONFIG_FINEGRAINED_THP
2426                 if (hpage_type == THP_TYPE_64KB) {
2427                         while (index < end) {
2428                                 clear_highpage(new_page + offset);
2429                                 offset++;
2430                                 index++;
2431                         }
2432                 } else {
2433                         while (index < end) {
2434                                 clear_highpage(new_page + (index % HPAGE_PMD_NR));
2435                                 index++;
2436                         }
2437                 }
2438 #else /* CONFIG_FINEGRAINED_THP */
2439                 while (index < end) {
2440                         clear_highpage(new_page + (index % HPAGE_PMD_NR));
2441                         index++;
2442                 }
2443 #endif /* CONFIG_FINEGRAINED_THP */
2444
2445                 SetPageUptodate(new_page);
2446 #ifdef CONFIG_FINEGRAINED_THP
2447                 page_ref_add(new_page, hpage_nr - 1);
2448 #else
2449                 page_ref_add(new_page, HPAGE_PMD_NR - 1);
2450 #endif
2451                 if (is_shmem)
2452                         set_page_dirty(new_page);
2453                 lru_cache_add(new_page);
2454
2455                 /*
2456                  * Remove pte page tables, so we can re-fault the page as huge.
2457                  */
2458 #ifdef CONFIG_FINEGRAINED_THP
2459                 retract_page_tables(mapping, start, hpage_type);
2460                 if (hpage_type == THP_TYPE_2MB)
2461                         *hpage = NULL;
2462 #else /* CONFIG_FINEGRAINED_THP */
2463                 retract_page_tables(mapping, start);
2464                 *hpage = NULL;
2465 #endif /* CONFIG_FINEGRAINED_THP */
2466                 khugepaged_pages_collapsed++;
2467         } else {
2468                 struct page *page;
2469
2470                 /* Something went wrong: roll back page cache changes */
2471                 xas_lock_irq(&xas);
2472                 mapping->nrpages -= nr_none;
2473
2474                 if (is_shmem)
2475                         shmem_uncharge(mapping->host, nr_none);
2476
2477                 xas_set(&xas, start);
2478                 xas_for_each(&xas, page, end - 1) {
2479                         page = list_first_entry_or_null(&pagelist,
2480                                         struct page, lru);
2481                         if (!page || xas.xa_index < page->index) {
2482                                 if (!nr_none)
2483                                         break;
2484                                 nr_none--;
2485                                 /* Put holes back where they were */
2486                                 xas_store(&xas, NULL);
2487                                 continue;
2488                         }
2489
2490                         VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
2491
2492                         /* Unfreeze the page. */
2493                         list_del(&page->lru);
2494                         page_ref_unfreeze(page, 2);
2495                         xas_store(&xas, page);
2496                         xas_pause(&xas);
2497                         xas_unlock_irq(&xas);
2498                         unlock_page(page);
2499                         putback_lru_page(page);
2500                         xas_lock_irq(&xas);
2501                 }
2502                 VM_BUG_ON(nr_none);
2503                 xas_unlock_irq(&xas);
2504
2505                 new_page->mapping = NULL;
2506         }
2507
2508         unlock_page(new_page);
2509 out:
2510 #ifdef CONFIG_FINEGRAINED_THP
2511         if (result != SCAN_SUCCEED && new_page && hpage_type == THP_TYPE_64KB)
2512                 put_page(new_page);
2513 #endif
2514         VM_BUG_ON(!list_empty(&pagelist));
2515         if (!IS_ERR_OR_NULL(*hpage))
2516                 mem_cgroup_uncharge(*hpage);
2517         /* TODO: tracepoints */
2518 }
2519
2520 #ifdef CONFIG_FINEGRAINED_THP
2521 static void khugepaged_scan_file(struct mm_struct *mm,
2522                 struct file *file, pgoff_t start, struct page **hpage,
2523                 int hpage_type)
2524 #else /* CONFIG_FINEGRAINED_THP */
2525 static void khugepaged_scan_file(struct mm_struct *mm,
2526                 struct file *file, pgoff_t start, struct page **hpage)
2527 #endif /* CONFIG_FINEGRAINED_THP */
2528 {
2529         struct page *page = NULL;
2530         struct address_space *mapping = file->f_mapping;
2531         XA_STATE(xas, &mapping->i_pages, start);
2532         int present, swap;
2533         int node = NUMA_NO_NODE;
2534         int result = SCAN_SUCCEED;
2535 #ifdef CONFIG_FINEGRAINED_THP
2536         int hpage_nr;
2537         int max_ptes_swap, max_ptes_none, max_ptes_shared;
2538
2539         if (hpage_type == THP_TYPE_64KB) {
2540                 hpage_nr = HPAGE_CONT_PTE_NR; /* 64KB */
2541                 max_ptes_swap = khugepaged_max_ptes_swap_64kb;
2542                 max_ptes_none = khugepaged_max_ptes_none_64kb;
2543                 max_ptes_shared = khugepaged_max_ptes_shared_64kb;
2544         } else {
2545                 hpage_nr = HPAGE_PMD_NR; /* 2MB */
2546                 max_ptes_swap = khugepaged_max_ptes_swap;
2547                 max_ptes_none = khugepaged_max_ptes_none;
2548                 max_ptes_shared = khugepaged_max_ptes_shared;
2549         }
2550 #endif /* CONFIG_FINEGRAINED_THP */
2551
2552         present = 0;
2553         swap = 0;
2554         memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
2555         rcu_read_lock();
2556 #ifdef CONFIG_FINEGRAINED_THP
2557         xas_for_each(&xas, page, start + hpage_nr - 1)
2558 #else
2559         xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1)
2560 #endif
2561         {
2562                 if (xas_retry(&xas, page))
2563                         continue;
2564
2565                 if (xa_is_value(page)) {
2566 #ifdef CONFIG_FINEGRAINED_THP
2567                         if (++swap > max_ptes_swap)
2568 #else
2569                         if (++swap > khugepaged_max_ptes_swap)
2570 #endif
2571                         {
2572                                 result = SCAN_EXCEED_SWAP_PTE;
2573                                 break;
2574                         }
2575                         continue;
2576                 }
2577
2578                 if (PageTransCompound(page)) {
2579                         result = SCAN_PAGE_COMPOUND;
2580                         break;
2581                 }
2582
2583                 node = page_to_nid(page);
2584                 if (khugepaged_scan_abort(node)) {
2585                         result = SCAN_SCAN_ABORT;
2586                         break;
2587                 }
2588                 khugepaged_node_load[node]++;
2589
2590                 if (!PageLRU(page)) {
2591                         result = SCAN_PAGE_LRU;
2592                         break;
2593                 }
2594
2595                 if (page_count(page) !=
2596                     1 + page_mapcount(page) + page_has_private(page)) {
2597                         result = SCAN_PAGE_COUNT;
2598                         break;
2599                 }
2600
2601                 /*
2602                  * We probably should check if the page is referenced here, but
2603                  * nobody would transfer pte_young() to PageReferenced() for us.
2604                  * And rmap walk here is just too costly...
2605                  */
2606
2607                 present++;
2608
2609                 if (need_resched()) {
2610                         xas_pause(&xas);
2611                         cond_resched_rcu();
2612                 }
2613         }
2614         rcu_read_unlock();
2615
2616         if (result == SCAN_SUCCEED) {
2617 #ifdef CONFIG_FINEGRAINED_THP
2618                 if (present < hpage_nr - max_ptes_none)
2619 #else
2620                 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none)
2621 #endif
2622                 {
2623                         result = SCAN_EXCEED_NONE_PTE;
2624                 } else {
2625                         node = khugepaged_find_target_node();
2626 #ifdef CONFIG_FINEGRAINED_THP
2627                         collapse_file(mm, file, start, hpage, node, hpage_type);
2628 #else
2629                         collapse_file(mm, file, start, hpage, node);
2630 #endif
2631                 }
2632         }
2633
2634         /* TODO: tracepoints */
2635 }
2636 #else
2637 #ifdef CONFIG_FINEGRAINED_THP
2638 static void khugepaged_scan_file(struct mm_struct *mm,
2639                 struct file *file, pgoff_t start, struct page **hpage,
2640                 int hpage_type)
2641 #else /* CONFIG_FINEGRAINED_THP */
2642 static void khugepaged_scan_file(struct mm_struct *mm,
2643                 struct file *file, pgoff_t start, struct page **hpage)
2644 #endif /* CONFIG_FINEGRAINED_THP */
2645 {
2646         BUILD_BUG();
2647 }
2648
2649 static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
2650 {
2651         return 0;
2652 }
2653 #endif
2654
2655 #ifdef CONFIG_FINEGRAINED_THP
2656 /*
2657  * if return value > 0 -> vma can make hugepage
2658  *    calculated hugepage start and hugepage end are stored in pointers
2659  * otherwise -> vma cannot make hugepage
2660  */
2661 static inline int hugepage_determine_htype(unsigned long vm_start,
2662                 unsigned long vm_end, unsigned long *hstart, unsigned long *hend) {
2663         unsigned long start, end;
2664
2665         /* determine 2MB hugepage */
2666         start = (vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2667         end = vm_end & HPAGE_PMD_MASK;
2668         if (start >= end) {
2669                 /* determine 64KB hugepage */
2670                 start = (vm_start + ~HPAGE_CONT_PTE_MASK) & HPAGE_CONT_PTE_MASK;
2671                 end = vm_end & HPAGE_CONT_PTE_MASK;
2672                 if (start >= end)
2673                         return THP_TYPE_FAIL;
2674                 *hstart = start;
2675                 *hend = end;
2676                 return THP_TYPE_64KB;
2677         }
2678         *hstart = start;
2679         *hend = end;
2680         return THP_TYPE_2MB;
2681 }
2682
2683 enum {
2684         KHUGEPAGE_SCAN_CONTINUE,
2685         KHUGEPAGE_SCAN_BREAK,
2686         KHUGEPAGE_SCAN_BREAK_MMAP_LOCK,
2687 };
2688
2689 static unsigned int khugepaged_scan_vma(struct mm_struct *mm,
2690                         struct vm_area_struct *vma, struct page **hpage,
2691                         unsigned int pages, int *progress)
2692 {
2693         unsigned long hstart, hend;
2694         int hpage_type, ret;
2695         int hpage_size, hpage_nr;
2696
2697         if (!hugepage_vma_check(vma, vma->vm_flags))
2698                 return KHUGEPAGE_SCAN_CONTINUE;
2699
2700         hpage_type = hugepage_determine_htype(
2701                                 (vma->vm_start > khugepaged_scan.address) ?
2702                                 vma->vm_start : khugepaged_scan.address,
2703                                 vma->vm_end, &hstart, &hend);
2704
2705         if (hpage_type == THP_TYPE_FAIL)
2706                 return KHUGEPAGE_SCAN_CONTINUE;
2707         if (khugepaged_scan.address > hend)
2708                 return KHUGEPAGE_SCAN_CONTINUE;
2709         if (khugepaged_scan.address < hstart)
2710                 khugepaged_scan.address = hstart;
2711
2712         if (hpage_type == THP_TYPE_64KB) {
2713                 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_CONT_PTE_MASK);
2714                 hpage_size = HPAGE_CONT_PTE_SIZE; /* 64KB */
2715                 hpage_nr = HPAGE_CONT_PTE_NR;
2716         } else if (hpage_type == THP_TYPE_2MB) {
2717                 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2718                 hpage_size = HPAGE_PMD_SIZE; /* 2MB */
2719                 hpage_nr = HPAGE_PMD_NR;
2720                 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
2721                     !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
2722                                 HPAGE_PMD_NR)) {
2723                         /* fallback, vma or file not aligned to 2MB */
2724                         hpage_size = HPAGE_CONT_PTE_SIZE; /* 64KB */
2725                         hpage_nr = HPAGE_CONT_PTE_NR;
2726                         hpage_type = THP_TYPE_64KB;
2727                 }
2728         } else
2729                 BUG();
2730
2731         while (khugepaged_scan.address < hend) {
2732                 if (khugepaged_scan.address + hpage_size > hend) {
2733                         if (khugepaged_scan.address + HPAGE_CONT_PTE_SIZE < hend) {
2734                                 hpage_size = HPAGE_CONT_PTE_SIZE;
2735                                 hpage_nr = HPAGE_CONT_PTE_NR;
2736                                 hpage_type = THP_TYPE_64KB;
2737                         }
2738                 }
2739                 ret = 0;
2740                 cond_resched();
2741                 if (unlikely(khugepaged_test_exit(mm)))
2742                         return KHUGEPAGE_SCAN_BREAK;
2743
2744                 VM_BUG_ON(khugepaged_scan.address < hstart ||
2745                                 khugepaged_scan.address + hpage_size >
2746                                 hend);
2747                 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2748                         struct file *file = get_file(vma->vm_file);
2749                         pgoff_t pgoff = linear_page_index(vma,
2750                                         khugepaged_scan.address);
2751
2752                         mmap_read_unlock(mm);
2753                         ret = 1;
2754                         khugepaged_scan_file(mm, file, pgoff, hpage, hpage_type);
2755                         fput(file);
2756                 } else {
2757                         ret = khugepaged_scan_pmd(mm, vma,
2758                                         khugepaged_scan.address,
2759                                         hpage, hpage_type);
2760                 }
2761                 /* move to next address */
2762                 khugepaged_scan.address += hpage_size;
2763                 *progress += hpage_nr;
2764                 if (ret)
2765                         /* we released mmap_sem so break loop */
2766                         return KHUGEPAGE_SCAN_BREAK_MMAP_LOCK;
2767                 if (*progress >= pages)
2768                         return KHUGEPAGE_SCAN_BREAK;
2769         }
2770         return KHUGEPAGE_SCAN_CONTINUE;
2771 }
2772
2773 static struct thp_scan_hint *find_scan_hint(struct mm_slot *slot,
2774                                                                 unsigned long addr)
2775 {
2776         struct thp_scan_hint *hint;
2777
2778         list_for_each_entry(hint, &khugepaged_scan.hint_list, hint_list) {
2779                 if (hint->slot == slot)
2780                         return hint;
2781         }
2782         return NULL;
2783 }
2784
2785 #ifdef CONFIG_THP_CONSERVATIVE
2786 /* caller must hold a proper mmap_lock */
2787 void khugepaged_mem_hook(struct mm_struct *mm, unsigned long addr,
2788                 long diff, const char *debug)
2789 {
2790         struct mm_slot *slot;
2791         struct vm_area_struct *vma;
2792         struct thp_scan_hint *hint;
2793         bool wakeup = false;
2794         bool retry = false;
2795
2796         vma = find_vma(mm, addr);
2797         if (!hugepage_vma_check(vma, vma->vm_flags))
2798                 return;
2799
2800 again:
2801         spin_lock(&khugepaged_mm_lock);
2802         slot = get_mm_slot(mm);
2803         if (!slot) {
2804                 /* make a new slot or go out */
2805                 spin_unlock(&khugepaged_mm_lock);
2806                 if (retry)
2807                         return;
2808                 if (__khugepaged_enter(mm))
2809                         return;
2810                 retry = true;
2811                 goto again;
2812         }
2813
2814         hint = find_scan_hint(slot, addr);
2815         if (!hint) {
2816                 spin_unlock(&khugepaged_mm_lock);
2817                 hint = kzalloc(sizeof(struct thp_scan_hint), GFP_KERNEL);
2818                 hint->vma = vma;
2819                 hint->slot = slot;
2820                 hint->diff = 0;
2821                 hint->jiffies = jiffies;
2822                 spin_lock(&khugepaged_mm_lock);
2823                 list_add(&hint->hint_list, &khugepaged_scan.hint_list);
2824                 khugepaged_scan.nr_hint++;
2825         }
2826         hint->diff += diff;
2827         if (hint->diff >= HPAGE_CONT_PTE_SIZE) {
2828                 wakeup = true;
2829                 //list_move(&hint->hint_list, &khugepaged_scan.hint_list);
2830         }
2831         spin_unlock(&khugepaged_mm_lock);
2832
2833         /* if possible, wake khugepaged up for starting a scan */
2834         if (wakeup) {
2835                 wake_up_interruptible(&khugepaged_wait);
2836         }
2837 }
2838 #else /* CONFIG_THP_CONSERVATIVE */
2839 void khugepaged_mem_hook(struct mm_struct *mm,
2840                         unsigned long addr, long diff, const char *debug)
2841 {}
2842 #endif /* CONFIG_THP_CONSERVATIVE */
2843
2844 static void clear_hint_list(struct mm_slot *slot)
2845 {
2846         struct thp_scan_hint *hint;
2847         hint = find_scan_hint(slot, 0);
2848         if (hint) {
2849                 list_del(&hint->hint_list);
2850                 kfree(hint);
2851                 khugepaged_scan.nr_hint--;
2852         }
2853 }
2854
2855 static struct thp_scan_hint *get_next_hint(void)
2856 {
2857         if (!list_empty(&khugepaged_scan.hint_list)) {
2858                 struct thp_scan_hint *hint = list_first_entry(
2859                                         &khugepaged_scan.hint_list,
2860                                         struct thp_scan_hint, hint_list);
2861                 list_del(&hint->hint_list);
2862                 khugepaged_scan.nr_hint--;
2863                 return hint;
2864         }
2865         return NULL;
2866 }
2867 #endif /* CONFIG_FINEGRAINED_THP */
2868
2869 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2870                                             struct page **hpage)
2871         __releases(&khugepaged_mm_lock)
2872         __acquires(&khugepaged_mm_lock)
2873 {
2874         struct mm_slot *mm_slot;
2875         struct mm_struct *mm;
2876         struct vm_area_struct *vma;
2877         int progress = 0;
2878
2879         VM_BUG_ON(!pages);
2880         lockdep_assert_held(&khugepaged_mm_lock);
2881
2882 #ifdef CONFIG_FINEGRAINED_THP
2883         if (khugepaged_scan.mm_slot)
2884                 mm_slot = khugepaged_scan.mm_slot;
2885         else if (!list_empty(&khugepaged_scan.hint_list)) {
2886                 struct thp_scan_hint *hint;
2887                 long mem_diff;
2888                 unsigned long jiffies_diff;
2889
2890 get_next_hint:
2891                 hint = get_next_hint();
2892                 if (!hint)
2893                         goto get_next_slot;
2894
2895                 mm_slot = hint->slot;
2896                 mem_diff = hint->diff;
2897                 jiffies_diff = jiffies - hint->jiffies;
2898                 kfree(hint);
2899                 clear_hint_list(mm_slot);
2900
2901                 if (khugepaged_test_exit(mm_slot->mm))
2902                         goto get_next_hint;
2903                 khugepaged_scan.address = 0;
2904                 khugepaged_scan.mm_slot = mm_slot;
2905         } else {
2906 get_next_slot:
2907                 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2908                                      struct mm_slot, mm_node);
2909                 clear_hint_list(mm_slot);
2910                 khugepaged_scan.address = 0;
2911                 khugepaged_scan.mm_slot = mm_slot;
2912         }
2913 #else /* CONFIG_FINEGRAINED_THP */
2914         if (khugepaged_scan.mm_slot)
2915                 mm_slot = khugepaged_scan.mm_slot;
2916         else {
2917                 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2918                                      struct mm_slot, mm_node);
2919                 khugepaged_scan.address = 0;
2920                 khugepaged_scan.mm_slot = mm_slot;
2921         }
2922 #endif /* CONFIG_FINEGRAINED_THP */
2923         spin_unlock(&khugepaged_mm_lock);
2924         khugepaged_collapse_pte_mapped_thps(mm_slot);
2925
2926         mm = mm_slot->mm;
2927         /*
2928          * Don't wait for semaphore (to avoid long wait times).  Just move to
2929          * the next mm on the list.
2930          */
2931         vma = NULL;
2932         if (unlikely(!mmap_read_trylock(mm)))
2933                 goto breakouterloop_mmap_lock;
2934         if (likely(!khugepaged_test_exit(mm)))
2935                 vma = find_vma(mm, khugepaged_scan.address);
2936
2937         progress++;
2938         for (; vma; vma = vma->vm_next) {
2939 #ifdef CONFIG_FINEGRAINED_THP
2940                 int ret;
2941 #else
2942                 unsigned long hstart, hend;
2943 #endif
2944
2945                 cond_resched();
2946                 if (unlikely(khugepaged_test_exit(mm))) {
2947                         progress++;
2948                         break;
2949                 }
2950 #ifdef CONFIG_FINEGRAINED_THP
2951                 ret = khugepaged_scan_vma(mm, vma, hpage, pages, &progress);
2952
2953                 if (ret == KHUGEPAGE_SCAN_CONTINUE) {
2954                         progress++;
2955                         continue;
2956                 } else if (ret == KHUGEPAGE_SCAN_BREAK)
2957                         goto breakouterloop;
2958                 else if (ret == KHUGEPAGE_SCAN_BREAK_MMAP_LOCK)
2959                         goto breakouterloop_mmap_lock;
2960 #else /* CONFIG_FINEGRAINED_THP */
2961                 if (!hugepage_vma_check(vma, vma->vm_flags)) {
2962 skip:
2963                         progress++;
2964                         continue;
2965                 }
2966                 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2967                 hend = vma->vm_end & HPAGE_PMD_MASK;
2968                 if (hstart >= hend)
2969                         goto skip;
2970                 if (khugepaged_scan.address > hend)
2971                         goto skip;
2972                 if (khugepaged_scan.address < hstart)
2973                         khugepaged_scan.address = hstart;
2974                 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2975                 if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
2976                         goto skip;
2977
2978                 while (khugepaged_scan.address < hend) {
2979                         int ret;
2980                         cond_resched();
2981                         if (unlikely(khugepaged_test_exit(mm)))
2982                                 goto breakouterloop;
2983
2984                         VM_BUG_ON(khugepaged_scan.address < hstart ||
2985                                   khugepaged_scan.address + HPAGE_PMD_SIZE >
2986                                   hend);
2987                         if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2988                                 struct file *file = get_file(vma->vm_file);
2989                                 pgoff_t pgoff = linear_page_index(vma,
2990                                                 khugepaged_scan.address);
2991
2992                                 mmap_read_unlock(mm);
2993                                 ret = 1;
2994                                 khugepaged_scan_file(mm, file, pgoff, hpage);
2995                                 fput(file);
2996                         } else {
2997                                 ret = khugepaged_scan_pmd(mm, vma,
2998                                                 khugepaged_scan.address,
2999                                                 hpage);
3000                         }
3001                         /* move to next address */
3002                         khugepaged_scan.address += HPAGE_PMD_SIZE;
3003                         progress += HPAGE_PMD_NR;
3004                         if (ret)
3005                                 /* we released mmap_lock so break loop */
3006                                 goto breakouterloop_mmap_lock;
3007                         if (progress >= pages)
3008                                 goto breakouterloop;
3009                 }
3010 #endif /* CONFIG_FINEGRAINED_THP */
3011         }
3012 breakouterloop:
3013         mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
3014 breakouterloop_mmap_lock:
3015
3016         spin_lock(&khugepaged_mm_lock);
3017         VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
3018         /*
3019          * Release the current mm_slot if this mm is about to die, or
3020          * if we scanned all vmas of this mm.
3021          */
3022         if (khugepaged_test_exit(mm) || !vma) {
3023                 /*
3024                  * Make sure that if mm_users is reaching zero while
3025                  * khugepaged runs here, khugepaged_exit will find
3026                  * mm_slot not pointing to the exiting mm.
3027                  */
3028 #ifdef CONFIG_FINEGRAINED_THP
3029                 if (!list_empty(&khugepaged_scan.hint_list)) {
3030                         unsigned long jiffies_diff;
3031                         long mem_diff;
3032                         struct thp_scan_hint *hint;
3033                         struct mm_slot *next_slot;
3034
3035 get_next_hint2:
3036                         hint = get_next_hint();
3037
3038                         if (!hint) {
3039                                 /* no more hint */
3040                                 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head)
3041                                         goto get_next_slot2;
3042                                 else
3043                                         goto end_loop;
3044                         }
3045
3046                         mem_diff = hint->diff;
3047                         jiffies_diff = jiffies - hint->jiffies;
3048                         next_slot = hint->slot;
3049                         kfree(hint);
3050
3051                         if (next_slot == mm_slot)
3052                                 goto get_next_hint2;
3053
3054                         if (!khugepaged_test_exit(next_slot->mm)) {
3055                                 list_move(&next_slot->mm_node, &mm_slot->mm_node);
3056                                 clear_hint_list(next_slot);
3057                         } else
3058                                 goto get_next_hint2;
3059
3060                         khugepaged_scan.mm_slot = next_slot;
3061                         khugepaged_scan.address = 0;
3062                 } else if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
3063 get_next_slot2:
3064                         khugepaged_scan.mm_slot = list_entry(
3065                                 mm_slot->mm_node.next,
3066                                 struct mm_slot, mm_node);
3067                         clear_hint_list(khugepaged_scan.mm_slot);
3068                         khugepaged_scan.address = 0;
3069                 } else {
3070 end_loop:
3071                         khugepaged_scan.mm_slot = NULL;
3072                         khugepaged_full_scans++;
3073                 }
3074 #else /* CONFIG_FINEGRAINED_THP */
3075                 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
3076                         khugepaged_scan.mm_slot = list_entry(
3077                                 mm_slot->mm_node.next,
3078                                 struct mm_slot, mm_node);
3079                         khugepaged_scan.address = 0;
3080                 } else {
3081                         khugepaged_scan.mm_slot = NULL;
3082                         khugepaged_full_scans++;
3083                 }
3084 #endif /* CONFIG_FINEGRAINED_THP */
3085                 collect_mm_slot(mm_slot);
3086         }
3087
3088         return progress;
3089 }
3090
3091 static int khugepaged_has_work(void)
3092 {
3093         return !list_empty(&khugepaged_scan.mm_head) &&
3094                 khugepaged_enabled();
3095 }
3096
3097 static int khugepaged_wait_event(void)
3098 {
3099         return !list_empty(&khugepaged_scan.mm_head) ||
3100                 kthread_should_stop();
3101 }
3102
3103 static void khugepaged_do_scan(void)
3104 {
3105         struct page *hpage = NULL;
3106         unsigned int progress = 0, pass_through_head = 0;
3107         unsigned int pages = khugepaged_pages_to_scan;
3108         bool wait = true;
3109
3110         barrier(); /* write khugepaged_pages_to_scan to local stack */
3111
3112         lru_add_drain_all();
3113
3114         while (progress < pages) {
3115                 if (!khugepaged_prealloc_page(&hpage, &wait))
3116                         break;
3117
3118                 cond_resched();
3119
3120                 if (unlikely(kthread_should_stop() || try_to_freeze()))
3121                         break;
3122
3123                 spin_lock(&khugepaged_mm_lock);
3124                 if (!khugepaged_scan.mm_slot)
3125                         pass_through_head++;
3126                 if (khugepaged_has_work() &&
3127                     pass_through_head < 2)
3128                         progress += khugepaged_scan_mm_slot(pages - progress,
3129                                                             &hpage);
3130                 else
3131                         progress = pages;
3132                 spin_unlock(&khugepaged_mm_lock);
3133         }
3134
3135         if (!IS_ERR_OR_NULL(hpage))
3136                 put_page(hpage);
3137 }
3138
3139 static bool khugepaged_should_wakeup(void)
3140 {
3141         return kthread_should_stop() ||
3142                time_after_eq(jiffies, khugepaged_sleep_expire);
3143 }
3144
3145 static void khugepaged_wait_work(void)
3146 {
3147         if (khugepaged_has_work()) {
3148                 const unsigned long scan_sleep_jiffies =
3149                         msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
3150
3151                 if (!scan_sleep_jiffies)
3152                         return;
3153
3154                 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
3155                 wait_event_freezable_timeout(khugepaged_wait,
3156                                              khugepaged_should_wakeup(),
3157                                              scan_sleep_jiffies);
3158                 return;
3159         }
3160
3161         if (khugepaged_enabled())
3162                 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
3163 }
3164
3165 #include <linux/delay.h>
3166 bool eager_allocation = false;
3167
3168 static int khugepaged(void *none)
3169 {
3170         struct mm_slot *mm_slot;
3171
3172         set_freezable();
3173         set_user_nice(current, MAX_NICE);
3174
3175         while (!kthread_should_stop()) {
3176                 khugepaged_do_scan();
3177                 khugepaged_wait_work();
3178         }
3179
3180         spin_lock(&khugepaged_mm_lock);
3181         mm_slot = khugepaged_scan.mm_slot;
3182         khugepaged_scan.mm_slot = NULL;
3183         if (mm_slot)
3184                 collect_mm_slot(mm_slot);
3185         spin_unlock(&khugepaged_mm_lock);
3186         return 0;
3187 }
3188
3189 static void set_recommended_min_free_kbytes(void)
3190 {
3191         struct zone *zone;
3192         int nr_zones = 0;
3193         unsigned long recommended_min;
3194
3195         for_each_populated_zone(zone) {
3196                 /*
3197                  * We don't need to worry about fragmentation of
3198                  * ZONE_MOVABLE since it only has movable pages.
3199                  */
3200                 if (zone_idx(zone) > gfp_zone(GFP_USER))
3201                         continue;
3202
3203                 nr_zones++;
3204         }
3205
3206         /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
3207         recommended_min = pageblock_nr_pages * nr_zones * 2;
3208
3209         /*
3210          * Make sure that on average at least two pageblocks are almost free
3211          * of another type, one for a migratetype to fall back to and a
3212          * second to avoid subsequent fallbacks of other types There are 3
3213          * MIGRATE_TYPES we care about.
3214          */
3215         recommended_min += pageblock_nr_pages * nr_zones *
3216                            MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
3217
3218         /* don't ever allow to reserve more than 5% of the lowmem */
3219         recommended_min = min(recommended_min,
3220                               (unsigned long) nr_free_buffer_pages() / 20);
3221         recommended_min <<= (PAGE_SHIFT-10);
3222
3223         if (recommended_min > min_free_kbytes) {
3224                 if (user_min_free_kbytes >= 0)
3225                         pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
3226                                 min_free_kbytes, recommended_min);
3227
3228                 min_free_kbytes = recommended_min;
3229         }
3230         setup_per_zone_wmarks();
3231 }
3232
3233 int start_stop_khugepaged(void)
3234 {
3235         int err = 0;
3236
3237         mutex_lock(&khugepaged_mutex);
3238         if (khugepaged_enabled()) {
3239                 if (!khugepaged_thread)
3240                         khugepaged_thread = kthread_run(khugepaged, NULL,
3241                                                         "khugepaged");
3242                 if (IS_ERR(khugepaged_thread)) {
3243                         pr_err("khugepaged: kthread_run(khugepaged) failed\n");
3244                         err = PTR_ERR(khugepaged_thread);
3245                         khugepaged_thread = NULL;
3246                         goto fail;
3247                 }
3248
3249                 if (!list_empty(&khugepaged_scan.mm_head))
3250                         wake_up_interruptible(&khugepaged_wait);
3251
3252                 set_recommended_min_free_kbytes();
3253         } else if (khugepaged_thread) {
3254                 kthread_stop(khugepaged_thread);
3255                 khugepaged_thread = NULL;
3256         }
3257 fail:
3258         mutex_unlock(&khugepaged_mutex);
3259         return err;
3260 }
3261
3262 void khugepaged_min_free_kbytes_update(void)
3263 {
3264         mutex_lock(&khugepaged_mutex);
3265         if (khugepaged_enabled() && khugepaged_thread)
3266                 set_recommended_min_free_kbytes();
3267         mutex_unlock(&khugepaged_mutex);
3268 }