Merge tag 'drm-misc-fixes-2017-07-27' of git://anongit.freedesktop.org/git/drm-misc...
[platform/kernel/linux-rpi.git] / mm / khugepaged.c
1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
3 #include <linux/mm.h>
4 #include <linux/sched.h>
5 #include <linux/sched/mm.h>
6 #include <linux/sched/coredump.h>
7 #include <linux/mmu_notifier.h>
8 #include <linux/rmap.h>
9 #include <linux/swap.h>
10 #include <linux/mm_inline.h>
11 #include <linux/kthread.h>
12 #include <linux/khugepaged.h>
13 #include <linux/freezer.h>
14 #include <linux/mman.h>
15 #include <linux/hashtable.h>
16 #include <linux/userfaultfd_k.h>
17 #include <linux/page_idle.h>
18 #include <linux/swapops.h>
19 #include <linux/shmem_fs.h>
20
21 #include <asm/tlb.h>
22 #include <asm/pgalloc.h>
23 #include "internal.h"
24
25 enum scan_result {
26         SCAN_FAIL,
27         SCAN_SUCCEED,
28         SCAN_PMD_NULL,
29         SCAN_EXCEED_NONE_PTE,
30         SCAN_PTE_NON_PRESENT,
31         SCAN_PAGE_RO,
32         SCAN_LACK_REFERENCED_PAGE,
33         SCAN_PAGE_NULL,
34         SCAN_SCAN_ABORT,
35         SCAN_PAGE_COUNT,
36         SCAN_PAGE_LRU,
37         SCAN_PAGE_LOCK,
38         SCAN_PAGE_ANON,
39         SCAN_PAGE_COMPOUND,
40         SCAN_ANY_PROCESS,
41         SCAN_VMA_NULL,
42         SCAN_VMA_CHECK,
43         SCAN_ADDRESS_RANGE,
44         SCAN_SWAP_CACHE_PAGE,
45         SCAN_DEL_PAGE_LRU,
46         SCAN_ALLOC_HUGE_PAGE_FAIL,
47         SCAN_CGROUP_CHARGE_FAIL,
48         SCAN_EXCEED_SWAP_PTE,
49         SCAN_TRUNCATED,
50 };
51
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/huge_memory.h>
54
55 /* default scan 8*512 pte (or vmas) every 30 second */
56 static unsigned int khugepaged_pages_to_scan __read_mostly;
57 static unsigned int khugepaged_pages_collapsed;
58 static unsigned int khugepaged_full_scans;
59 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
60 /* during fragmentation poll the hugepage allocator once every minute */
61 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
62 static unsigned long khugepaged_sleep_expire;
63 static DEFINE_SPINLOCK(khugepaged_mm_lock);
64 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
65 /*
66  * default collapse hugepages if there is at least one pte mapped like
67  * it would have happened if the vma was large enough during page
68  * fault.
69  */
70 static unsigned int khugepaged_max_ptes_none __read_mostly;
71 static unsigned int khugepaged_max_ptes_swap __read_mostly;
72
73 #define MM_SLOTS_HASH_BITS 10
74 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
75
76 static struct kmem_cache *mm_slot_cache __read_mostly;
77
78 /**
79  * struct mm_slot - hash lookup from mm to mm_slot
80  * @hash: hash collision list
81  * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
82  * @mm: the mm that this information is valid for
83  */
84 struct mm_slot {
85         struct hlist_node hash;
86         struct list_head mm_node;
87         struct mm_struct *mm;
88 };
89
90 /**
91  * struct khugepaged_scan - cursor for scanning
92  * @mm_head: the head of the mm list to scan
93  * @mm_slot: the current mm_slot we are scanning
94  * @address: the next address inside that to be scanned
95  *
96  * There is only the one khugepaged_scan instance of this cursor structure.
97  */
98 struct khugepaged_scan {
99         struct list_head mm_head;
100         struct mm_slot *mm_slot;
101         unsigned long address;
102 };
103
104 static struct khugepaged_scan khugepaged_scan = {
105         .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
106 };
107
108 #ifdef CONFIG_SYSFS
109 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
110                                          struct kobj_attribute *attr,
111                                          char *buf)
112 {
113         return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
114 }
115
116 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
117                                           struct kobj_attribute *attr,
118                                           const char *buf, size_t count)
119 {
120         unsigned long msecs;
121         int err;
122
123         err = kstrtoul(buf, 10, &msecs);
124         if (err || msecs > UINT_MAX)
125                 return -EINVAL;
126
127         khugepaged_scan_sleep_millisecs = msecs;
128         khugepaged_sleep_expire = 0;
129         wake_up_interruptible(&khugepaged_wait);
130
131         return count;
132 }
133 static struct kobj_attribute scan_sleep_millisecs_attr =
134         __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
135                scan_sleep_millisecs_store);
136
137 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
138                                           struct kobj_attribute *attr,
139                                           char *buf)
140 {
141         return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
142 }
143
144 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
145                                            struct kobj_attribute *attr,
146                                            const char *buf, size_t count)
147 {
148         unsigned long msecs;
149         int err;
150
151         err = kstrtoul(buf, 10, &msecs);
152         if (err || msecs > UINT_MAX)
153                 return -EINVAL;
154
155         khugepaged_alloc_sleep_millisecs = msecs;
156         khugepaged_sleep_expire = 0;
157         wake_up_interruptible(&khugepaged_wait);
158
159         return count;
160 }
161 static struct kobj_attribute alloc_sleep_millisecs_attr =
162         __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
163                alloc_sleep_millisecs_store);
164
165 static ssize_t pages_to_scan_show(struct kobject *kobj,
166                                   struct kobj_attribute *attr,
167                                   char *buf)
168 {
169         return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
170 }
171 static ssize_t pages_to_scan_store(struct kobject *kobj,
172                                    struct kobj_attribute *attr,
173                                    const char *buf, size_t count)
174 {
175         int err;
176         unsigned long pages;
177
178         err = kstrtoul(buf, 10, &pages);
179         if (err || !pages || pages > UINT_MAX)
180                 return -EINVAL;
181
182         khugepaged_pages_to_scan = pages;
183
184         return count;
185 }
186 static struct kobj_attribute pages_to_scan_attr =
187         __ATTR(pages_to_scan, 0644, pages_to_scan_show,
188                pages_to_scan_store);
189
190 static ssize_t pages_collapsed_show(struct kobject *kobj,
191                                     struct kobj_attribute *attr,
192                                     char *buf)
193 {
194         return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
195 }
196 static struct kobj_attribute pages_collapsed_attr =
197         __ATTR_RO(pages_collapsed);
198
199 static ssize_t full_scans_show(struct kobject *kobj,
200                                struct kobj_attribute *attr,
201                                char *buf)
202 {
203         return sprintf(buf, "%u\n", khugepaged_full_scans);
204 }
205 static struct kobj_attribute full_scans_attr =
206         __ATTR_RO(full_scans);
207
208 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
209                                       struct kobj_attribute *attr, char *buf)
210 {
211         return single_hugepage_flag_show(kobj, attr, buf,
212                                 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
213 }
214 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
215                                        struct kobj_attribute *attr,
216                                        const char *buf, size_t count)
217 {
218         return single_hugepage_flag_store(kobj, attr, buf, count,
219                                  TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
220 }
221 static struct kobj_attribute khugepaged_defrag_attr =
222         __ATTR(defrag, 0644, khugepaged_defrag_show,
223                khugepaged_defrag_store);
224
225 /*
226  * max_ptes_none controls if khugepaged should collapse hugepages over
227  * any unmapped ptes in turn potentially increasing the memory
228  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
229  * reduce the available free memory in the system as it
230  * runs. Increasing max_ptes_none will instead potentially reduce the
231  * free memory in the system during the khugepaged scan.
232  */
233 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
234                                              struct kobj_attribute *attr,
235                                              char *buf)
236 {
237         return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
238 }
239 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
240                                               struct kobj_attribute *attr,
241                                               const char *buf, size_t count)
242 {
243         int err;
244         unsigned long max_ptes_none;
245
246         err = kstrtoul(buf, 10, &max_ptes_none);
247         if (err || max_ptes_none > HPAGE_PMD_NR-1)
248                 return -EINVAL;
249
250         khugepaged_max_ptes_none = max_ptes_none;
251
252         return count;
253 }
254 static struct kobj_attribute khugepaged_max_ptes_none_attr =
255         __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
256                khugepaged_max_ptes_none_store);
257
258 static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
259                                              struct kobj_attribute *attr,
260                                              char *buf)
261 {
262         return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
263 }
264
265 static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
266                                               struct kobj_attribute *attr,
267                                               const char *buf, size_t count)
268 {
269         int err;
270         unsigned long max_ptes_swap;
271
272         err  = kstrtoul(buf, 10, &max_ptes_swap);
273         if (err || max_ptes_swap > HPAGE_PMD_NR-1)
274                 return -EINVAL;
275
276         khugepaged_max_ptes_swap = max_ptes_swap;
277
278         return count;
279 }
280
281 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
282         __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
283                khugepaged_max_ptes_swap_store);
284
285 static struct attribute *khugepaged_attr[] = {
286         &khugepaged_defrag_attr.attr,
287         &khugepaged_max_ptes_none_attr.attr,
288         &pages_to_scan_attr.attr,
289         &pages_collapsed_attr.attr,
290         &full_scans_attr.attr,
291         &scan_sleep_millisecs_attr.attr,
292         &alloc_sleep_millisecs_attr.attr,
293         &khugepaged_max_ptes_swap_attr.attr,
294         NULL,
295 };
296
297 struct attribute_group khugepaged_attr_group = {
298         .attrs = khugepaged_attr,
299         .name = "khugepaged",
300 };
301 #endif /* CONFIG_SYSFS */
302
303 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
304
305 int hugepage_madvise(struct vm_area_struct *vma,
306                      unsigned long *vm_flags, int advice)
307 {
308         switch (advice) {
309         case MADV_HUGEPAGE:
310 #ifdef CONFIG_S390
311                 /*
312                  * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
313                  * can't handle this properly after s390_enable_sie, so we simply
314                  * ignore the madvise to prevent qemu from causing a SIGSEGV.
315                  */
316                 if (mm_has_pgste(vma->vm_mm))
317                         return 0;
318 #endif
319                 *vm_flags &= ~VM_NOHUGEPAGE;
320                 *vm_flags |= VM_HUGEPAGE;
321                 /*
322                  * If the vma become good for khugepaged to scan,
323                  * register it here without waiting a page fault that
324                  * may not happen any time soon.
325                  */
326                 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
327                                 khugepaged_enter_vma_merge(vma, *vm_flags))
328                         return -ENOMEM;
329                 break;
330         case MADV_NOHUGEPAGE:
331                 *vm_flags &= ~VM_HUGEPAGE;
332                 *vm_flags |= VM_NOHUGEPAGE;
333                 /*
334                  * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
335                  * this vma even if we leave the mm registered in khugepaged if
336                  * it got registered before VM_NOHUGEPAGE was set.
337                  */
338                 break;
339         }
340
341         return 0;
342 }
343
344 int __init khugepaged_init(void)
345 {
346         mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
347                                           sizeof(struct mm_slot),
348                                           __alignof__(struct mm_slot), 0, NULL);
349         if (!mm_slot_cache)
350                 return -ENOMEM;
351
352         khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
353         khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
354         khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
355
356         return 0;
357 }
358
359 void __init khugepaged_destroy(void)
360 {
361         kmem_cache_destroy(mm_slot_cache);
362 }
363
364 static inline struct mm_slot *alloc_mm_slot(void)
365 {
366         if (!mm_slot_cache)     /* initialization failed */
367                 return NULL;
368         return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
369 }
370
371 static inline void free_mm_slot(struct mm_slot *mm_slot)
372 {
373         kmem_cache_free(mm_slot_cache, mm_slot);
374 }
375
376 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
377 {
378         struct mm_slot *mm_slot;
379
380         hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
381                 if (mm == mm_slot->mm)
382                         return mm_slot;
383
384         return NULL;
385 }
386
387 static void insert_to_mm_slots_hash(struct mm_struct *mm,
388                                     struct mm_slot *mm_slot)
389 {
390         mm_slot->mm = mm;
391         hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
392 }
393
394 static inline int khugepaged_test_exit(struct mm_struct *mm)
395 {
396         return atomic_read(&mm->mm_users) == 0;
397 }
398
399 int __khugepaged_enter(struct mm_struct *mm)
400 {
401         struct mm_slot *mm_slot;
402         int wakeup;
403
404         mm_slot = alloc_mm_slot();
405         if (!mm_slot)
406                 return -ENOMEM;
407
408         /* __khugepaged_exit() must not run from under us */
409         VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
410         if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
411                 free_mm_slot(mm_slot);
412                 return 0;
413         }
414
415         spin_lock(&khugepaged_mm_lock);
416         insert_to_mm_slots_hash(mm, mm_slot);
417         /*
418          * Insert just behind the scanning cursor, to let the area settle
419          * down a little.
420          */
421         wakeup = list_empty(&khugepaged_scan.mm_head);
422         list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
423         spin_unlock(&khugepaged_mm_lock);
424
425         mmgrab(mm);
426         if (wakeup)
427                 wake_up_interruptible(&khugepaged_wait);
428
429         return 0;
430 }
431
432 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
433                                unsigned long vm_flags)
434 {
435         unsigned long hstart, hend;
436         if (!vma->anon_vma)
437                 /*
438                  * Not yet faulted in so we will register later in the
439                  * page fault if needed.
440                  */
441                 return 0;
442         if (vma->vm_ops || (vm_flags & VM_NO_KHUGEPAGED))
443                 /* khugepaged not yet working on file or special mappings */
444                 return 0;
445         hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
446         hend = vma->vm_end & HPAGE_PMD_MASK;
447         if (hstart < hend)
448                 return khugepaged_enter(vma, vm_flags);
449         return 0;
450 }
451
452 void __khugepaged_exit(struct mm_struct *mm)
453 {
454         struct mm_slot *mm_slot;
455         int free = 0;
456
457         spin_lock(&khugepaged_mm_lock);
458         mm_slot = get_mm_slot(mm);
459         if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
460                 hash_del(&mm_slot->hash);
461                 list_del(&mm_slot->mm_node);
462                 free = 1;
463         }
464         spin_unlock(&khugepaged_mm_lock);
465
466         if (free) {
467                 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
468                 free_mm_slot(mm_slot);
469                 mmdrop(mm);
470         } else if (mm_slot) {
471                 /*
472                  * This is required to serialize against
473                  * khugepaged_test_exit() (which is guaranteed to run
474                  * under mmap sem read mode). Stop here (after we
475                  * return all pagetables will be destroyed) until
476                  * khugepaged has finished working on the pagetables
477                  * under the mmap_sem.
478                  */
479                 down_write(&mm->mmap_sem);
480                 up_write(&mm->mmap_sem);
481         }
482 }
483
484 static void release_pte_page(struct page *page)
485 {
486         dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
487         unlock_page(page);
488         putback_lru_page(page);
489 }
490
491 static void release_pte_pages(pte_t *pte, pte_t *_pte)
492 {
493         while (--_pte >= pte) {
494                 pte_t pteval = *_pte;
495                 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
496                         release_pte_page(pte_page(pteval));
497         }
498 }
499
500 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
501                                         unsigned long address,
502                                         pte_t *pte)
503 {
504         struct page *page = NULL;
505         pte_t *_pte;
506         int none_or_zero = 0, result = 0, referenced = 0;
507         bool writable = false;
508
509         for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
510              _pte++, address += PAGE_SIZE) {
511                 pte_t pteval = *_pte;
512                 if (pte_none(pteval) || (pte_present(pteval) &&
513                                 is_zero_pfn(pte_pfn(pteval)))) {
514                         if (!userfaultfd_armed(vma) &&
515                             ++none_or_zero <= khugepaged_max_ptes_none) {
516                                 continue;
517                         } else {
518                                 result = SCAN_EXCEED_NONE_PTE;
519                                 goto out;
520                         }
521                 }
522                 if (!pte_present(pteval)) {
523                         result = SCAN_PTE_NON_PRESENT;
524                         goto out;
525                 }
526                 page = vm_normal_page(vma, address, pteval);
527                 if (unlikely(!page)) {
528                         result = SCAN_PAGE_NULL;
529                         goto out;
530                 }
531
532                 VM_BUG_ON_PAGE(PageCompound(page), page);
533                 VM_BUG_ON_PAGE(!PageAnon(page), page);
534
535                 /*
536                  * We can do it before isolate_lru_page because the
537                  * page can't be freed from under us. NOTE: PG_lock
538                  * is needed to serialize against split_huge_page
539                  * when invoked from the VM.
540                  */
541                 if (!trylock_page(page)) {
542                         result = SCAN_PAGE_LOCK;
543                         goto out;
544                 }
545
546                 /*
547                  * cannot use mapcount: can't collapse if there's a gup pin.
548                  * The page must only be referenced by the scanned process
549                  * and page swap cache.
550                  */
551                 if (page_count(page) != 1 + PageSwapCache(page)) {
552                         unlock_page(page);
553                         result = SCAN_PAGE_COUNT;
554                         goto out;
555                 }
556                 if (pte_write(pteval)) {
557                         writable = true;
558                 } else {
559                         if (PageSwapCache(page) &&
560                             !reuse_swap_page(page, NULL)) {
561                                 unlock_page(page);
562                                 result = SCAN_SWAP_CACHE_PAGE;
563                                 goto out;
564                         }
565                         /*
566                          * Page is not in the swap cache. It can be collapsed
567                          * into a THP.
568                          */
569                 }
570
571                 /*
572                  * Isolate the page to avoid collapsing an hugepage
573                  * currently in use by the VM.
574                  */
575                 if (isolate_lru_page(page)) {
576                         unlock_page(page);
577                         result = SCAN_DEL_PAGE_LRU;
578                         goto out;
579                 }
580                 inc_node_page_state(page,
581                                 NR_ISOLATED_ANON + page_is_file_cache(page));
582                 VM_BUG_ON_PAGE(!PageLocked(page), page);
583                 VM_BUG_ON_PAGE(PageLRU(page), page);
584
585                 /* There should be enough young pte to collapse the page */
586                 if (pte_young(pteval) ||
587                     page_is_young(page) || PageReferenced(page) ||
588                     mmu_notifier_test_young(vma->vm_mm, address))
589                         referenced++;
590         }
591         if (likely(writable)) {
592                 if (likely(referenced)) {
593                         result = SCAN_SUCCEED;
594                         trace_mm_collapse_huge_page_isolate(page, none_or_zero,
595                                                             referenced, writable, result);
596                         return 1;
597                 }
598         } else {
599                 result = SCAN_PAGE_RO;
600         }
601
602 out:
603         release_pte_pages(pte, _pte);
604         trace_mm_collapse_huge_page_isolate(page, none_or_zero,
605                                             referenced, writable, result);
606         return 0;
607 }
608
609 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
610                                       struct vm_area_struct *vma,
611                                       unsigned long address,
612                                       spinlock_t *ptl)
613 {
614         pte_t *_pte;
615         for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
616                                 _pte++, page++, address += PAGE_SIZE) {
617                 pte_t pteval = *_pte;
618                 struct page *src_page;
619
620                 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
621                         clear_user_highpage(page, address);
622                         add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
623                         if (is_zero_pfn(pte_pfn(pteval))) {
624                                 /*
625                                  * ptl mostly unnecessary.
626                                  */
627                                 spin_lock(ptl);
628                                 /*
629                                  * paravirt calls inside pte_clear here are
630                                  * superfluous.
631                                  */
632                                 pte_clear(vma->vm_mm, address, _pte);
633                                 spin_unlock(ptl);
634                         }
635                 } else {
636                         src_page = pte_page(pteval);
637                         copy_user_highpage(page, src_page, address, vma);
638                         VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
639                         release_pte_page(src_page);
640                         /*
641                          * ptl mostly unnecessary, but preempt has to
642                          * be disabled to update the per-cpu stats
643                          * inside page_remove_rmap().
644                          */
645                         spin_lock(ptl);
646                         /*
647                          * paravirt calls inside pte_clear here are
648                          * superfluous.
649                          */
650                         pte_clear(vma->vm_mm, address, _pte);
651                         page_remove_rmap(src_page, false);
652                         spin_unlock(ptl);
653                         free_page_and_swap_cache(src_page);
654                 }
655         }
656 }
657
658 static void khugepaged_alloc_sleep(void)
659 {
660         DEFINE_WAIT(wait);
661
662         add_wait_queue(&khugepaged_wait, &wait);
663         freezable_schedule_timeout_interruptible(
664                 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
665         remove_wait_queue(&khugepaged_wait, &wait);
666 }
667
668 static int khugepaged_node_load[MAX_NUMNODES];
669
670 static bool khugepaged_scan_abort(int nid)
671 {
672         int i;
673
674         /*
675          * If node_reclaim_mode is disabled, then no extra effort is made to
676          * allocate memory locally.
677          */
678         if (!node_reclaim_mode)
679                 return false;
680
681         /* If there is a count for this node already, it must be acceptable */
682         if (khugepaged_node_load[nid])
683                 return false;
684
685         for (i = 0; i < MAX_NUMNODES; i++) {
686                 if (!khugepaged_node_load[i])
687                         continue;
688                 if (node_distance(nid, i) > RECLAIM_DISTANCE)
689                         return true;
690         }
691         return false;
692 }
693
694 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
695 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
696 {
697         return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
698 }
699
700 #ifdef CONFIG_NUMA
701 static int khugepaged_find_target_node(void)
702 {
703         static int last_khugepaged_target_node = NUMA_NO_NODE;
704         int nid, target_node = 0, max_value = 0;
705
706         /* find first node with max normal pages hit */
707         for (nid = 0; nid < MAX_NUMNODES; nid++)
708                 if (khugepaged_node_load[nid] > max_value) {
709                         max_value = khugepaged_node_load[nid];
710                         target_node = nid;
711                 }
712
713         /* do some balance if several nodes have the same hit record */
714         if (target_node <= last_khugepaged_target_node)
715                 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
716                                 nid++)
717                         if (max_value == khugepaged_node_load[nid]) {
718                                 target_node = nid;
719                                 break;
720                         }
721
722         last_khugepaged_target_node = target_node;
723         return target_node;
724 }
725
726 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
727 {
728         if (IS_ERR(*hpage)) {
729                 if (!*wait)
730                         return false;
731
732                 *wait = false;
733                 *hpage = NULL;
734                 khugepaged_alloc_sleep();
735         } else if (*hpage) {
736                 put_page(*hpage);
737                 *hpage = NULL;
738         }
739
740         return true;
741 }
742
743 static struct page *
744 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
745 {
746         VM_BUG_ON_PAGE(*hpage, *hpage);
747
748         *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
749         if (unlikely(!*hpage)) {
750                 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
751                 *hpage = ERR_PTR(-ENOMEM);
752                 return NULL;
753         }
754
755         prep_transhuge_page(*hpage);
756         count_vm_event(THP_COLLAPSE_ALLOC);
757         return *hpage;
758 }
759 #else
760 static int khugepaged_find_target_node(void)
761 {
762         return 0;
763 }
764
765 static inline struct page *alloc_khugepaged_hugepage(void)
766 {
767         struct page *page;
768
769         page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
770                            HPAGE_PMD_ORDER);
771         if (page)
772                 prep_transhuge_page(page);
773         return page;
774 }
775
776 static struct page *khugepaged_alloc_hugepage(bool *wait)
777 {
778         struct page *hpage;
779
780         do {
781                 hpage = alloc_khugepaged_hugepage();
782                 if (!hpage) {
783                         count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
784                         if (!*wait)
785                                 return NULL;
786
787                         *wait = false;
788                         khugepaged_alloc_sleep();
789                 } else
790                         count_vm_event(THP_COLLAPSE_ALLOC);
791         } while (unlikely(!hpage) && likely(khugepaged_enabled()));
792
793         return hpage;
794 }
795
796 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
797 {
798         if (!*hpage)
799                 *hpage = khugepaged_alloc_hugepage(wait);
800
801         if (unlikely(!*hpage))
802                 return false;
803
804         return true;
805 }
806
807 static struct page *
808 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
809 {
810         VM_BUG_ON(!*hpage);
811
812         return  *hpage;
813 }
814 #endif
815
816 static bool hugepage_vma_check(struct vm_area_struct *vma)
817 {
818         if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
819             (vma->vm_flags & VM_NOHUGEPAGE) ||
820             test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
821                 return false;
822         if (shmem_file(vma->vm_file)) {
823                 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
824                         return false;
825                 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
826                                 HPAGE_PMD_NR);
827         }
828         if (!vma->anon_vma || vma->vm_ops)
829                 return false;
830         if (is_vma_temporary_stack(vma))
831                 return false;
832         return !(vma->vm_flags & VM_NO_KHUGEPAGED);
833 }
834
835 /*
836  * If mmap_sem temporarily dropped, revalidate vma
837  * before taking mmap_sem.
838  * Return 0 if succeeds, otherwise return none-zero
839  * value (scan code).
840  */
841
842 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
843                 struct vm_area_struct **vmap)
844 {
845         struct vm_area_struct *vma;
846         unsigned long hstart, hend;
847
848         if (unlikely(khugepaged_test_exit(mm)))
849                 return SCAN_ANY_PROCESS;
850
851         *vmap = vma = find_vma(mm, address);
852         if (!vma)
853                 return SCAN_VMA_NULL;
854
855         hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
856         hend = vma->vm_end & HPAGE_PMD_MASK;
857         if (address < hstart || address + HPAGE_PMD_SIZE > hend)
858                 return SCAN_ADDRESS_RANGE;
859         if (!hugepage_vma_check(vma))
860                 return SCAN_VMA_CHECK;
861         return 0;
862 }
863
864 /*
865  * Bring missing pages in from swap, to complete THP collapse.
866  * Only done if khugepaged_scan_pmd believes it is worthwhile.
867  *
868  * Called and returns without pte mapped or spinlocks held,
869  * but with mmap_sem held to protect against vma changes.
870  */
871
872 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
873                                         struct vm_area_struct *vma,
874                                         unsigned long address, pmd_t *pmd,
875                                         int referenced)
876 {
877         int swapped_in = 0, ret = 0;
878         struct vm_fault vmf = {
879                 .vma = vma,
880                 .address = address,
881                 .flags = FAULT_FLAG_ALLOW_RETRY,
882                 .pmd = pmd,
883                 .pgoff = linear_page_index(vma, address),
884         };
885
886         /* we only decide to swapin, if there is enough young ptes */
887         if (referenced < HPAGE_PMD_NR/2) {
888                 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
889                 return false;
890         }
891         vmf.pte = pte_offset_map(pmd, address);
892         for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
893                         vmf.pte++, vmf.address += PAGE_SIZE) {
894                 vmf.orig_pte = *vmf.pte;
895                 if (!is_swap_pte(vmf.orig_pte))
896                         continue;
897                 swapped_in++;
898                 ret = do_swap_page(&vmf);
899
900                 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
901                 if (ret & VM_FAULT_RETRY) {
902                         down_read(&mm->mmap_sem);
903                         if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
904                                 /* vma is no longer available, don't continue to swapin */
905                                 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
906                                 return false;
907                         }
908                         /* check if the pmd is still valid */
909                         if (mm_find_pmd(mm, address) != pmd) {
910                                 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
911                                 return false;
912                         }
913                 }
914                 if (ret & VM_FAULT_ERROR) {
915                         trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
916                         return false;
917                 }
918                 /* pte is unmapped now, we need to map it */
919                 vmf.pte = pte_offset_map(pmd, vmf.address);
920         }
921         vmf.pte--;
922         pte_unmap(vmf.pte);
923         trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
924         return true;
925 }
926
927 static void collapse_huge_page(struct mm_struct *mm,
928                                    unsigned long address,
929                                    struct page **hpage,
930                                    int node, int referenced)
931 {
932         pmd_t *pmd, _pmd;
933         pte_t *pte;
934         pgtable_t pgtable;
935         struct page *new_page;
936         spinlock_t *pmd_ptl, *pte_ptl;
937         int isolated = 0, result = 0;
938         struct mem_cgroup *memcg;
939         struct vm_area_struct *vma;
940         unsigned long mmun_start;       /* For mmu_notifiers */
941         unsigned long mmun_end;         /* For mmu_notifiers */
942         gfp_t gfp;
943
944         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
945
946         /* Only allocate from the target node */
947         gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
948
949         /*
950          * Before allocating the hugepage, release the mmap_sem read lock.
951          * The allocation can take potentially a long time if it involves
952          * sync compaction, and we do not need to hold the mmap_sem during
953          * that. We will recheck the vma after taking it again in write mode.
954          */
955         up_read(&mm->mmap_sem);
956         new_page = khugepaged_alloc_page(hpage, gfp, node);
957         if (!new_page) {
958                 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
959                 goto out_nolock;
960         }
961
962         if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
963                 result = SCAN_CGROUP_CHARGE_FAIL;
964                 goto out_nolock;
965         }
966
967         down_read(&mm->mmap_sem);
968         result = hugepage_vma_revalidate(mm, address, &vma);
969         if (result) {
970                 mem_cgroup_cancel_charge(new_page, memcg, true);
971                 up_read(&mm->mmap_sem);
972                 goto out_nolock;
973         }
974
975         pmd = mm_find_pmd(mm, address);
976         if (!pmd) {
977                 result = SCAN_PMD_NULL;
978                 mem_cgroup_cancel_charge(new_page, memcg, true);
979                 up_read(&mm->mmap_sem);
980                 goto out_nolock;
981         }
982
983         /*
984          * __collapse_huge_page_swapin always returns with mmap_sem locked.
985          * If it fails, we release mmap_sem and jump out_nolock.
986          * Continuing to collapse causes inconsistency.
987          */
988         if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
989                 mem_cgroup_cancel_charge(new_page, memcg, true);
990                 up_read(&mm->mmap_sem);
991                 goto out_nolock;
992         }
993
994         up_read(&mm->mmap_sem);
995         /*
996          * Prevent all access to pagetables with the exception of
997          * gup_fast later handled by the ptep_clear_flush and the VM
998          * handled by the anon_vma lock + PG_lock.
999          */
1000         down_write(&mm->mmap_sem);
1001         result = hugepage_vma_revalidate(mm, address, &vma);
1002         if (result)
1003                 goto out;
1004         /* check if the pmd is still valid */
1005         if (mm_find_pmd(mm, address) != pmd)
1006                 goto out;
1007
1008         anon_vma_lock_write(vma->anon_vma);
1009
1010         pte = pte_offset_map(pmd, address);
1011         pte_ptl = pte_lockptr(mm, pmd);
1012
1013         mmun_start = address;
1014         mmun_end   = address + HPAGE_PMD_SIZE;
1015         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1016         pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1017         /*
1018          * After this gup_fast can't run anymore. This also removes
1019          * any huge TLB entry from the CPU so we won't allow
1020          * huge and small TLB entries for the same virtual address
1021          * to avoid the risk of CPU bugs in that area.
1022          */
1023         _pmd = pmdp_collapse_flush(vma, address, pmd);
1024         spin_unlock(pmd_ptl);
1025         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1026
1027         spin_lock(pte_ptl);
1028         isolated = __collapse_huge_page_isolate(vma, address, pte);
1029         spin_unlock(pte_ptl);
1030
1031         if (unlikely(!isolated)) {
1032                 pte_unmap(pte);
1033                 spin_lock(pmd_ptl);
1034                 BUG_ON(!pmd_none(*pmd));
1035                 /*
1036                  * We can only use set_pmd_at when establishing
1037                  * hugepmds and never for establishing regular pmds that
1038                  * points to regular pagetables. Use pmd_populate for that
1039                  */
1040                 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1041                 spin_unlock(pmd_ptl);
1042                 anon_vma_unlock_write(vma->anon_vma);
1043                 result = SCAN_FAIL;
1044                 goto out;
1045         }
1046
1047         /*
1048          * All pages are isolated and locked so anon_vma rmap
1049          * can't run anymore.
1050          */
1051         anon_vma_unlock_write(vma->anon_vma);
1052
1053         __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
1054         pte_unmap(pte);
1055         __SetPageUptodate(new_page);
1056         pgtable = pmd_pgtable(_pmd);
1057
1058         _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1059         _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1060
1061         /*
1062          * spin_lock() below is not the equivalent of smp_wmb(), so
1063          * this is needed to avoid the copy_huge_page writes to become
1064          * visible after the set_pmd_at() write.
1065          */
1066         smp_wmb();
1067
1068         spin_lock(pmd_ptl);
1069         BUG_ON(!pmd_none(*pmd));
1070         page_add_new_anon_rmap(new_page, vma, address, true);
1071         mem_cgroup_commit_charge(new_page, memcg, false, true);
1072         lru_cache_add_active_or_unevictable(new_page, vma);
1073         pgtable_trans_huge_deposit(mm, pmd, pgtable);
1074         set_pmd_at(mm, address, pmd, _pmd);
1075         update_mmu_cache_pmd(vma, address, pmd);
1076         spin_unlock(pmd_ptl);
1077
1078         *hpage = NULL;
1079
1080         khugepaged_pages_collapsed++;
1081         result = SCAN_SUCCEED;
1082 out_up_write:
1083         up_write(&mm->mmap_sem);
1084 out_nolock:
1085         trace_mm_collapse_huge_page(mm, isolated, result);
1086         return;
1087 out:
1088         mem_cgroup_cancel_charge(new_page, memcg, true);
1089         goto out_up_write;
1090 }
1091
1092 static int khugepaged_scan_pmd(struct mm_struct *mm,
1093                                struct vm_area_struct *vma,
1094                                unsigned long address,
1095                                struct page **hpage)
1096 {
1097         pmd_t *pmd;
1098         pte_t *pte, *_pte;
1099         int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
1100         struct page *page = NULL;
1101         unsigned long _address;
1102         spinlock_t *ptl;
1103         int node = NUMA_NO_NODE, unmapped = 0;
1104         bool writable = false;
1105
1106         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1107
1108         pmd = mm_find_pmd(mm, address);
1109         if (!pmd) {
1110                 result = SCAN_PMD_NULL;
1111                 goto out;
1112         }
1113
1114         memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1115         pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1116         for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1117              _pte++, _address += PAGE_SIZE) {
1118                 pte_t pteval = *_pte;
1119                 if (is_swap_pte(pteval)) {
1120                         if (++unmapped <= khugepaged_max_ptes_swap) {
1121                                 continue;
1122                         } else {
1123                                 result = SCAN_EXCEED_SWAP_PTE;
1124                                 goto out_unmap;
1125                         }
1126                 }
1127                 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1128                         if (!userfaultfd_armed(vma) &&
1129                             ++none_or_zero <= khugepaged_max_ptes_none) {
1130                                 continue;
1131                         } else {
1132                                 result = SCAN_EXCEED_NONE_PTE;
1133                                 goto out_unmap;
1134                         }
1135                 }
1136                 if (!pte_present(pteval)) {
1137                         result = SCAN_PTE_NON_PRESENT;
1138                         goto out_unmap;
1139                 }
1140                 if (pte_write(pteval))
1141                         writable = true;
1142
1143                 page = vm_normal_page(vma, _address, pteval);
1144                 if (unlikely(!page)) {
1145                         result = SCAN_PAGE_NULL;
1146                         goto out_unmap;
1147                 }
1148
1149                 /* TODO: teach khugepaged to collapse THP mapped with pte */
1150                 if (PageCompound(page)) {
1151                         result = SCAN_PAGE_COMPOUND;
1152                         goto out_unmap;
1153                 }
1154
1155                 /*
1156                  * Record which node the original page is from and save this
1157                  * information to khugepaged_node_load[].
1158                  * Khupaged will allocate hugepage from the node has the max
1159                  * hit record.
1160                  */
1161                 node = page_to_nid(page);
1162                 if (khugepaged_scan_abort(node)) {
1163                         result = SCAN_SCAN_ABORT;
1164                         goto out_unmap;
1165                 }
1166                 khugepaged_node_load[node]++;
1167                 if (!PageLRU(page)) {
1168                         result = SCAN_PAGE_LRU;
1169                         goto out_unmap;
1170                 }
1171                 if (PageLocked(page)) {
1172                         result = SCAN_PAGE_LOCK;
1173                         goto out_unmap;
1174                 }
1175                 if (!PageAnon(page)) {
1176                         result = SCAN_PAGE_ANON;
1177                         goto out_unmap;
1178                 }
1179
1180                 /*
1181                  * cannot use mapcount: can't collapse if there's a gup pin.
1182                  * The page must only be referenced by the scanned process
1183                  * and page swap cache.
1184                  */
1185                 if (page_count(page) != 1 + PageSwapCache(page)) {
1186                         result = SCAN_PAGE_COUNT;
1187                         goto out_unmap;
1188                 }
1189                 if (pte_young(pteval) ||
1190                     page_is_young(page) || PageReferenced(page) ||
1191                     mmu_notifier_test_young(vma->vm_mm, address))
1192                         referenced++;
1193         }
1194         if (writable) {
1195                 if (referenced) {
1196                         result = SCAN_SUCCEED;
1197                         ret = 1;
1198                 } else {
1199                         result = SCAN_LACK_REFERENCED_PAGE;
1200                 }
1201         } else {
1202                 result = SCAN_PAGE_RO;
1203         }
1204 out_unmap:
1205         pte_unmap_unlock(pte, ptl);
1206         if (ret) {
1207                 node = khugepaged_find_target_node();
1208                 /* collapse_huge_page will return with the mmap_sem released */
1209                 collapse_huge_page(mm, address, hpage, node, referenced);
1210         }
1211 out:
1212         trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1213                                      none_or_zero, result, unmapped);
1214         return ret;
1215 }
1216
1217 static void collect_mm_slot(struct mm_slot *mm_slot)
1218 {
1219         struct mm_struct *mm = mm_slot->mm;
1220
1221         VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1222
1223         if (khugepaged_test_exit(mm)) {
1224                 /* free mm_slot */
1225                 hash_del(&mm_slot->hash);
1226                 list_del(&mm_slot->mm_node);
1227
1228                 /*
1229                  * Not strictly needed because the mm exited already.
1230                  *
1231                  * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1232                  */
1233
1234                 /* khugepaged_mm_lock actually not necessary for the below */
1235                 free_mm_slot(mm_slot);
1236                 mmdrop(mm);
1237         }
1238 }
1239
1240 #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
1241 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1242 {
1243         struct vm_area_struct *vma;
1244         unsigned long addr;
1245         pmd_t *pmd, _pmd;
1246
1247         i_mmap_lock_write(mapping);
1248         vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1249                 /* probably overkill */
1250                 if (vma->anon_vma)
1251                         continue;
1252                 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1253                 if (addr & ~HPAGE_PMD_MASK)
1254                         continue;
1255                 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1256                         continue;
1257                 pmd = mm_find_pmd(vma->vm_mm, addr);
1258                 if (!pmd)
1259                         continue;
1260                 /*
1261                  * We need exclusive mmap_sem to retract page table.
1262                  * If trylock fails we would end up with pte-mapped THP after
1263                  * re-fault. Not ideal, but it's more important to not disturb
1264                  * the system too much.
1265                  */
1266                 if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
1267                         spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
1268                         /* assume page table is clear */
1269                         _pmd = pmdp_collapse_flush(vma, addr, pmd);
1270                         spin_unlock(ptl);
1271                         up_write(&vma->vm_mm->mmap_sem);
1272                         atomic_long_dec(&vma->vm_mm->nr_ptes);
1273                         pte_free(vma->vm_mm, pmd_pgtable(_pmd));
1274                 }
1275         }
1276         i_mmap_unlock_write(mapping);
1277 }
1278
1279 /**
1280  * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
1281  *
1282  * Basic scheme is simple, details are more complex:
1283  *  - allocate and freeze a new huge page;
1284  *  - scan over radix tree replacing old pages the new one
1285  *    + swap in pages if necessary;
1286  *    + fill in gaps;
1287  *    + keep old pages around in case if rollback is required;
1288  *  - if replacing succeed:
1289  *    + copy data over;
1290  *    + free old pages;
1291  *    + unfreeze huge page;
1292  *  - if replacing failed;
1293  *    + put all pages back and unfreeze them;
1294  *    + restore gaps in the radix-tree;
1295  *    + free huge page;
1296  */
1297 static void collapse_shmem(struct mm_struct *mm,
1298                 struct address_space *mapping, pgoff_t start,
1299                 struct page **hpage, int node)
1300 {
1301         gfp_t gfp;
1302         struct page *page, *new_page, *tmp;
1303         struct mem_cgroup *memcg;
1304         pgoff_t index, end = start + HPAGE_PMD_NR;
1305         LIST_HEAD(pagelist);
1306         struct radix_tree_iter iter;
1307         void **slot;
1308         int nr_none = 0, result = SCAN_SUCCEED;
1309
1310         VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1311
1312         /* Only allocate from the target node */
1313         gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1314
1315         new_page = khugepaged_alloc_page(hpage, gfp, node);
1316         if (!new_page) {
1317                 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1318                 goto out;
1319         }
1320
1321         if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
1322                 result = SCAN_CGROUP_CHARGE_FAIL;
1323                 goto out;
1324         }
1325
1326         new_page->index = start;
1327         new_page->mapping = mapping;
1328         __SetPageSwapBacked(new_page);
1329         __SetPageLocked(new_page);
1330         BUG_ON(!page_ref_freeze(new_page, 1));
1331
1332
1333         /*
1334          * At this point the new_page is 'frozen' (page_count() is zero), locked
1335          * and not up-to-date. It's safe to insert it into radix tree, because
1336          * nobody would be able to map it or use it in other way until we
1337          * unfreeze it.
1338          */
1339
1340         index = start;
1341         spin_lock_irq(&mapping->tree_lock);
1342         radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1343                 int n = min(iter.index, end) - index;
1344
1345                 /*
1346                  * Handle holes in the radix tree: charge it from shmem and
1347                  * insert relevant subpage of new_page into the radix-tree.
1348                  */
1349                 if (n && !shmem_charge(mapping->host, n)) {
1350                         result = SCAN_FAIL;
1351                         break;
1352                 }
1353                 nr_none += n;
1354                 for (; index < min(iter.index, end); index++) {
1355                         radix_tree_insert(&mapping->page_tree, index,
1356                                         new_page + (index % HPAGE_PMD_NR));
1357                 }
1358
1359                 /* We are done. */
1360                 if (index >= end)
1361                         break;
1362
1363                 page = radix_tree_deref_slot_protected(slot,
1364                                 &mapping->tree_lock);
1365                 if (radix_tree_exceptional_entry(page) || !PageUptodate(page)) {
1366                         spin_unlock_irq(&mapping->tree_lock);
1367                         /* swap in or instantiate fallocated page */
1368                         if (shmem_getpage(mapping->host, index, &page,
1369                                                 SGP_NOHUGE)) {
1370                                 result = SCAN_FAIL;
1371                                 goto tree_unlocked;
1372                         }
1373                         spin_lock_irq(&mapping->tree_lock);
1374                 } else if (trylock_page(page)) {
1375                         get_page(page);
1376                 } else {
1377                         result = SCAN_PAGE_LOCK;
1378                         break;
1379                 }
1380
1381                 /*
1382                  * The page must be locked, so we can drop the tree_lock
1383                  * without racing with truncate.
1384                  */
1385                 VM_BUG_ON_PAGE(!PageLocked(page), page);
1386                 VM_BUG_ON_PAGE(!PageUptodate(page), page);
1387                 VM_BUG_ON_PAGE(PageTransCompound(page), page);
1388
1389                 if (page_mapping(page) != mapping) {
1390                         result = SCAN_TRUNCATED;
1391                         goto out_unlock;
1392                 }
1393                 spin_unlock_irq(&mapping->tree_lock);
1394
1395                 if (isolate_lru_page(page)) {
1396                         result = SCAN_DEL_PAGE_LRU;
1397                         goto out_isolate_failed;
1398                 }
1399
1400                 if (page_mapped(page))
1401                         unmap_mapping_range(mapping, index << PAGE_SHIFT,
1402                                         PAGE_SIZE, 0);
1403
1404                 spin_lock_irq(&mapping->tree_lock);
1405
1406                 slot = radix_tree_lookup_slot(&mapping->page_tree, index);
1407                 VM_BUG_ON_PAGE(page != radix_tree_deref_slot_protected(slot,
1408                                         &mapping->tree_lock), page);
1409                 VM_BUG_ON_PAGE(page_mapped(page), page);
1410
1411                 /*
1412                  * The page is expected to have page_count() == 3:
1413                  *  - we hold a pin on it;
1414                  *  - one reference from radix tree;
1415                  *  - one from isolate_lru_page;
1416                  */
1417                 if (!page_ref_freeze(page, 3)) {
1418                         result = SCAN_PAGE_COUNT;
1419                         goto out_lru;
1420                 }
1421
1422                 /*
1423                  * Add the page to the list to be able to undo the collapse if
1424                  * something go wrong.
1425                  */
1426                 list_add_tail(&page->lru, &pagelist);
1427
1428                 /* Finally, replace with the new page. */
1429                 radix_tree_replace_slot(&mapping->page_tree, slot,
1430                                 new_page + (index % HPAGE_PMD_NR));
1431
1432                 slot = radix_tree_iter_resume(slot, &iter);
1433                 index++;
1434                 continue;
1435 out_lru:
1436                 spin_unlock_irq(&mapping->tree_lock);
1437                 putback_lru_page(page);
1438 out_isolate_failed:
1439                 unlock_page(page);
1440                 put_page(page);
1441                 goto tree_unlocked;
1442 out_unlock:
1443                 unlock_page(page);
1444                 put_page(page);
1445                 break;
1446         }
1447
1448         /*
1449          * Handle hole in radix tree at the end of the range.
1450          * This code only triggers if there's nothing in radix tree
1451          * beyond 'end'.
1452          */
1453         if (result == SCAN_SUCCEED && index < end) {
1454                 int n = end - index;
1455
1456                 if (!shmem_charge(mapping->host, n)) {
1457                         result = SCAN_FAIL;
1458                         goto tree_locked;
1459                 }
1460
1461                 for (; index < end; index++) {
1462                         radix_tree_insert(&mapping->page_tree, index,
1463                                         new_page + (index % HPAGE_PMD_NR));
1464                 }
1465                 nr_none += n;
1466         }
1467
1468 tree_locked:
1469         spin_unlock_irq(&mapping->tree_lock);
1470 tree_unlocked:
1471
1472         if (result == SCAN_SUCCEED) {
1473                 unsigned long flags;
1474                 struct zone *zone = page_zone(new_page);
1475
1476                 /*
1477                  * Replacing old pages with new one has succeed, now we need to
1478                  * copy the content and free old pages.
1479                  */
1480                 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1481                         copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1482                                         page);
1483                         list_del(&page->lru);
1484                         unlock_page(page);
1485                         page_ref_unfreeze(page, 1);
1486                         page->mapping = NULL;
1487                         ClearPageActive(page);
1488                         ClearPageUnevictable(page);
1489                         put_page(page);
1490                 }
1491
1492                 local_irq_save(flags);
1493                 __inc_node_page_state(new_page, NR_SHMEM_THPS);
1494                 if (nr_none) {
1495                         __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1496                         __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
1497                 }
1498                 local_irq_restore(flags);
1499
1500                 /*
1501                  * Remove pte page tables, so we can re-faulti
1502                  * the page as huge.
1503                  */
1504                 retract_page_tables(mapping, start);
1505
1506                 /* Everything is ready, let's unfreeze the new_page */
1507                 set_page_dirty(new_page);
1508                 SetPageUptodate(new_page);
1509                 page_ref_unfreeze(new_page, HPAGE_PMD_NR);
1510                 mem_cgroup_commit_charge(new_page, memcg, false, true);
1511                 lru_cache_add_anon(new_page);
1512                 unlock_page(new_page);
1513
1514                 *hpage = NULL;
1515         } else {
1516                 /* Something went wrong: rollback changes to the radix-tree */
1517                 shmem_uncharge(mapping->host, nr_none);
1518                 spin_lock_irq(&mapping->tree_lock);
1519                 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter,
1520                                 start) {
1521                         if (iter.index >= end)
1522                                 break;
1523                         page = list_first_entry_or_null(&pagelist,
1524                                         struct page, lru);
1525                         if (!page || iter.index < page->index) {
1526                                 if (!nr_none)
1527                                         break;
1528                                 nr_none--;
1529                                 /* Put holes back where they were */
1530                                 radix_tree_delete(&mapping->page_tree,
1531                                                   iter.index);
1532                                 continue;
1533                         }
1534
1535                         VM_BUG_ON_PAGE(page->index != iter.index, page);
1536
1537                         /* Unfreeze the page. */
1538                         list_del(&page->lru);
1539                         page_ref_unfreeze(page, 2);
1540                         radix_tree_replace_slot(&mapping->page_tree,
1541                                                 slot, page);
1542                         slot = radix_tree_iter_resume(slot, &iter);
1543                         spin_unlock_irq(&mapping->tree_lock);
1544                         putback_lru_page(page);
1545                         unlock_page(page);
1546                         spin_lock_irq(&mapping->tree_lock);
1547                 }
1548                 VM_BUG_ON(nr_none);
1549                 spin_unlock_irq(&mapping->tree_lock);
1550
1551                 /* Unfreeze new_page, caller would take care about freeing it */
1552                 page_ref_unfreeze(new_page, 1);
1553                 mem_cgroup_cancel_charge(new_page, memcg, true);
1554                 unlock_page(new_page);
1555                 new_page->mapping = NULL;
1556         }
1557 out:
1558         VM_BUG_ON(!list_empty(&pagelist));
1559         /* TODO: tracepoints */
1560 }
1561
1562 static void khugepaged_scan_shmem(struct mm_struct *mm,
1563                 struct address_space *mapping,
1564                 pgoff_t start, struct page **hpage)
1565 {
1566         struct page *page = NULL;
1567         struct radix_tree_iter iter;
1568         void **slot;
1569         int present, swap;
1570         int node = NUMA_NO_NODE;
1571         int result = SCAN_SUCCEED;
1572
1573         present = 0;
1574         swap = 0;
1575         memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1576         rcu_read_lock();
1577         radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1578                 if (iter.index >= start + HPAGE_PMD_NR)
1579                         break;
1580
1581                 page = radix_tree_deref_slot(slot);
1582                 if (radix_tree_deref_retry(page)) {
1583                         slot = radix_tree_iter_retry(&iter);
1584                         continue;
1585                 }
1586
1587                 if (radix_tree_exception(page)) {
1588                         if (++swap > khugepaged_max_ptes_swap) {
1589                                 result = SCAN_EXCEED_SWAP_PTE;
1590                                 break;
1591                         }
1592                         continue;
1593                 }
1594
1595                 if (PageTransCompound(page)) {
1596                         result = SCAN_PAGE_COMPOUND;
1597                         break;
1598                 }
1599
1600                 node = page_to_nid(page);
1601                 if (khugepaged_scan_abort(node)) {
1602                         result = SCAN_SCAN_ABORT;
1603                         break;
1604                 }
1605                 khugepaged_node_load[node]++;
1606
1607                 if (!PageLRU(page)) {
1608                         result = SCAN_PAGE_LRU;
1609                         break;
1610                 }
1611
1612                 if (page_count(page) != 1 + page_mapcount(page)) {
1613                         result = SCAN_PAGE_COUNT;
1614                         break;
1615                 }
1616
1617                 /*
1618                  * We probably should check if the page is referenced here, but
1619                  * nobody would transfer pte_young() to PageReferenced() for us.
1620                  * And rmap walk here is just too costly...
1621                  */
1622
1623                 present++;
1624
1625                 if (need_resched()) {
1626                         slot = radix_tree_iter_resume(slot, &iter);
1627                         cond_resched_rcu();
1628                 }
1629         }
1630         rcu_read_unlock();
1631
1632         if (result == SCAN_SUCCEED) {
1633                 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
1634                         result = SCAN_EXCEED_NONE_PTE;
1635                 } else {
1636                         node = khugepaged_find_target_node();
1637                         collapse_shmem(mm, mapping, start, hpage, node);
1638                 }
1639         }
1640
1641         /* TODO: tracepoints */
1642 }
1643 #else
1644 static void khugepaged_scan_shmem(struct mm_struct *mm,
1645                 struct address_space *mapping,
1646                 pgoff_t start, struct page **hpage)
1647 {
1648         BUILD_BUG();
1649 }
1650 #endif
1651
1652 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1653                                             struct page **hpage)
1654         __releases(&khugepaged_mm_lock)
1655         __acquires(&khugepaged_mm_lock)
1656 {
1657         struct mm_slot *mm_slot;
1658         struct mm_struct *mm;
1659         struct vm_area_struct *vma;
1660         int progress = 0;
1661
1662         VM_BUG_ON(!pages);
1663         VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1664
1665         if (khugepaged_scan.mm_slot)
1666                 mm_slot = khugepaged_scan.mm_slot;
1667         else {
1668                 mm_slot = list_entry(khugepaged_scan.mm_head.next,
1669                                      struct mm_slot, mm_node);
1670                 khugepaged_scan.address = 0;
1671                 khugepaged_scan.mm_slot = mm_slot;
1672         }
1673         spin_unlock(&khugepaged_mm_lock);
1674
1675         mm = mm_slot->mm;
1676         down_read(&mm->mmap_sem);
1677         if (unlikely(khugepaged_test_exit(mm)))
1678                 vma = NULL;
1679         else
1680                 vma = find_vma(mm, khugepaged_scan.address);
1681
1682         progress++;
1683         for (; vma; vma = vma->vm_next) {
1684                 unsigned long hstart, hend;
1685
1686                 cond_resched();
1687                 if (unlikely(khugepaged_test_exit(mm))) {
1688                         progress++;
1689                         break;
1690                 }
1691                 if (!hugepage_vma_check(vma)) {
1692 skip:
1693                         progress++;
1694                         continue;
1695                 }
1696                 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1697                 hend = vma->vm_end & HPAGE_PMD_MASK;
1698                 if (hstart >= hend)
1699                         goto skip;
1700                 if (khugepaged_scan.address > hend)
1701                         goto skip;
1702                 if (khugepaged_scan.address < hstart)
1703                         khugepaged_scan.address = hstart;
1704                 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1705
1706                 while (khugepaged_scan.address < hend) {
1707                         int ret;
1708                         cond_resched();
1709                         if (unlikely(khugepaged_test_exit(mm)))
1710                                 goto breakouterloop;
1711
1712                         VM_BUG_ON(khugepaged_scan.address < hstart ||
1713                                   khugepaged_scan.address + HPAGE_PMD_SIZE >
1714                                   hend);
1715                         if (shmem_file(vma->vm_file)) {
1716                                 struct file *file;
1717                                 pgoff_t pgoff = linear_page_index(vma,
1718                                                 khugepaged_scan.address);
1719                                 if (!shmem_huge_enabled(vma))
1720                                         goto skip;
1721                                 file = get_file(vma->vm_file);
1722                                 up_read(&mm->mmap_sem);
1723                                 ret = 1;
1724                                 khugepaged_scan_shmem(mm, file->f_mapping,
1725                                                 pgoff, hpage);
1726                                 fput(file);
1727                         } else {
1728                                 ret = khugepaged_scan_pmd(mm, vma,
1729                                                 khugepaged_scan.address,
1730                                                 hpage);
1731                         }
1732                         /* move to next address */
1733                         khugepaged_scan.address += HPAGE_PMD_SIZE;
1734                         progress += HPAGE_PMD_NR;
1735                         if (ret)
1736                                 /* we released mmap_sem so break loop */
1737                                 goto breakouterloop_mmap_sem;
1738                         if (progress >= pages)
1739                                 goto breakouterloop;
1740                 }
1741         }
1742 breakouterloop:
1743         up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
1744 breakouterloop_mmap_sem:
1745
1746         spin_lock(&khugepaged_mm_lock);
1747         VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
1748         /*
1749          * Release the current mm_slot if this mm is about to die, or
1750          * if we scanned all vmas of this mm.
1751          */
1752         if (khugepaged_test_exit(mm) || !vma) {
1753                 /*
1754                  * Make sure that if mm_users is reaching zero while
1755                  * khugepaged runs here, khugepaged_exit will find
1756                  * mm_slot not pointing to the exiting mm.
1757                  */
1758                 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
1759                         khugepaged_scan.mm_slot = list_entry(
1760                                 mm_slot->mm_node.next,
1761                                 struct mm_slot, mm_node);
1762                         khugepaged_scan.address = 0;
1763                 } else {
1764                         khugepaged_scan.mm_slot = NULL;
1765                         khugepaged_full_scans++;
1766                 }
1767
1768                 collect_mm_slot(mm_slot);
1769         }
1770
1771         return progress;
1772 }
1773
1774 static int khugepaged_has_work(void)
1775 {
1776         return !list_empty(&khugepaged_scan.mm_head) &&
1777                 khugepaged_enabled();
1778 }
1779
1780 static int khugepaged_wait_event(void)
1781 {
1782         return !list_empty(&khugepaged_scan.mm_head) ||
1783                 kthread_should_stop();
1784 }
1785
1786 static void khugepaged_do_scan(void)
1787 {
1788         struct page *hpage = NULL;
1789         unsigned int progress = 0, pass_through_head = 0;
1790         unsigned int pages = khugepaged_pages_to_scan;
1791         bool wait = true;
1792
1793         barrier(); /* write khugepaged_pages_to_scan to local stack */
1794
1795         while (progress < pages) {
1796                 if (!khugepaged_prealloc_page(&hpage, &wait))
1797                         break;
1798
1799                 cond_resched();
1800
1801                 if (unlikely(kthread_should_stop() || try_to_freeze()))
1802                         break;
1803
1804                 spin_lock(&khugepaged_mm_lock);
1805                 if (!khugepaged_scan.mm_slot)
1806                         pass_through_head++;
1807                 if (khugepaged_has_work() &&
1808                     pass_through_head < 2)
1809                         progress += khugepaged_scan_mm_slot(pages - progress,
1810                                                             &hpage);
1811                 else
1812                         progress = pages;
1813                 spin_unlock(&khugepaged_mm_lock);
1814         }
1815
1816         if (!IS_ERR_OR_NULL(hpage))
1817                 put_page(hpage);
1818 }
1819
1820 static bool khugepaged_should_wakeup(void)
1821 {
1822         return kthread_should_stop() ||
1823                time_after_eq(jiffies, khugepaged_sleep_expire);
1824 }
1825
1826 static void khugepaged_wait_work(void)
1827 {
1828         if (khugepaged_has_work()) {
1829                 const unsigned long scan_sleep_jiffies =
1830                         msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
1831
1832                 if (!scan_sleep_jiffies)
1833                         return;
1834
1835                 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
1836                 wait_event_freezable_timeout(khugepaged_wait,
1837                                              khugepaged_should_wakeup(),
1838                                              scan_sleep_jiffies);
1839                 return;
1840         }
1841
1842         if (khugepaged_enabled())
1843                 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
1844 }
1845
1846 static int khugepaged(void *none)
1847 {
1848         struct mm_slot *mm_slot;
1849
1850         set_freezable();
1851         set_user_nice(current, MAX_NICE);
1852
1853         while (!kthread_should_stop()) {
1854                 khugepaged_do_scan();
1855                 khugepaged_wait_work();
1856         }
1857
1858         spin_lock(&khugepaged_mm_lock);
1859         mm_slot = khugepaged_scan.mm_slot;
1860         khugepaged_scan.mm_slot = NULL;
1861         if (mm_slot)
1862                 collect_mm_slot(mm_slot);
1863         spin_unlock(&khugepaged_mm_lock);
1864         return 0;
1865 }
1866
1867 static void set_recommended_min_free_kbytes(void)
1868 {
1869         struct zone *zone;
1870         int nr_zones = 0;
1871         unsigned long recommended_min;
1872
1873         for_each_populated_zone(zone)
1874                 nr_zones++;
1875
1876         /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
1877         recommended_min = pageblock_nr_pages * nr_zones * 2;
1878
1879         /*
1880          * Make sure that on average at least two pageblocks are almost free
1881          * of another type, one for a migratetype to fall back to and a
1882          * second to avoid subsequent fallbacks of other types There are 3
1883          * MIGRATE_TYPES we care about.
1884          */
1885         recommended_min += pageblock_nr_pages * nr_zones *
1886                            MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
1887
1888         /* don't ever allow to reserve more than 5% of the lowmem */
1889         recommended_min = min(recommended_min,
1890                               (unsigned long) nr_free_buffer_pages() / 20);
1891         recommended_min <<= (PAGE_SHIFT-10);
1892
1893         if (recommended_min > min_free_kbytes) {
1894                 if (user_min_free_kbytes >= 0)
1895                         pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
1896                                 min_free_kbytes, recommended_min);
1897
1898                 min_free_kbytes = recommended_min;
1899         }
1900         setup_per_zone_wmarks();
1901 }
1902
1903 int start_stop_khugepaged(void)
1904 {
1905         static struct task_struct *khugepaged_thread __read_mostly;
1906         static DEFINE_MUTEX(khugepaged_mutex);
1907         int err = 0;
1908
1909         mutex_lock(&khugepaged_mutex);
1910         if (khugepaged_enabled()) {
1911                 if (!khugepaged_thread)
1912                         khugepaged_thread = kthread_run(khugepaged, NULL,
1913                                                         "khugepaged");
1914                 if (IS_ERR(khugepaged_thread)) {
1915                         pr_err("khugepaged: kthread_run(khugepaged) failed\n");
1916                         err = PTR_ERR(khugepaged_thread);
1917                         khugepaged_thread = NULL;
1918                         goto fail;
1919                 }
1920
1921                 if (!list_empty(&khugepaged_scan.mm_head))
1922                         wake_up_interruptible(&khugepaged_wait);
1923
1924                 set_recommended_min_free_kbytes();
1925         } else if (khugepaged_thread) {
1926                 kthread_stop(khugepaged_thread);
1927                 khugepaged_thread = NULL;
1928         }
1929 fail:
1930         mutex_unlock(&khugepaged_mutex);
1931         return err;
1932 }