BACKPORT: mm: multi-gen LRU: optimize multiple memcgs
[platform/kernel/linux-rpi.git] / mm / vmscan.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/mm/vmscan.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *
7  *  Swap reorganised 29.12.95, Stephen Tweedie.
8  *  kswapd added: 7.1.96  sct
9  *  Removed kswapd_ctl limits, and swap out as many pages as needed
10  *  to bring the system back to freepages.high: 2.4.97, Rik van Riel.
11  *  Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
12  *  Multiqueue VM started 5.8.00, Rik van Riel.
13  */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/mm.h>
18 #include <linux/sched/mm.h>
19 #include <linux/module.h>
20 #include <linux/gfp.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/swap.h>
23 #include <linux/pagemap.h>
24 #include <linux/init.h>
25 #include <linux/highmem.h>
26 #include <linux/vmpressure.h>
27 #include <linux/vmstat.h>
28 #include <linux/file.h>
29 #include <linux/writeback.h>
30 #include <linux/blkdev.h>
31 #include <linux/buffer_head.h>  /* for try_to_release_page(),
32                                         buffer_heads_over_limit */
33 #include <linux/mm_inline.h>
34 #include <linux/backing-dev.h>
35 #include <linux/rmap.h>
36 #include <linux/topology.h>
37 #include <linux/cpu.h>
38 #include <linux/cpuset.h>
39 #include <linux/compaction.h>
40 #include <linux/notifier.h>
41 #include <linux/rwsem.h>
42 #include <linux/delay.h>
43 #include <linux/kthread.h>
44 #include <linux/freezer.h>
45 #include <linux/memcontrol.h>
46 #include <linux/delayacct.h>
47 #include <linux/sysctl.h>
48 #include <linux/oom.h>
49 #include <linux/pagevec.h>
50 #include <linux/prefetch.h>
51 #include <linux/printk.h>
52 #include <linux/dax.h>
53 #include <linux/psi.h>
54 #include <linux/pagewalk.h>
55 #include <linux/shmem_fs.h>
56
57 #include <asm/tlbflush.h>
58 #include <asm/div64.h>
59
60 #include <linux/swapops.h>
61 #include <linux/balloon_compaction.h>
62
63 #include "internal.h"
64
65 #define CREATE_TRACE_POINTS
66 #include <trace/events/vmscan.h>
67
68 struct scan_control {
69         /* How many pages shrink_list() should reclaim */
70         unsigned long nr_to_reclaim;
71
72         /*
73          * Nodemask of nodes allowed by the caller. If NULL, all nodes
74          * are scanned.
75          */
76         nodemask_t      *nodemask;
77
78         /*
79          * The memory cgroup that hit its limit and as a result is the
80          * primary target of this reclaim invocation.
81          */
82         struct mem_cgroup *target_mem_cgroup;
83
84         /* Can active pages be deactivated as part of reclaim? */
85 #define DEACTIVATE_ANON 1
86 #define DEACTIVATE_FILE 2
87         unsigned int may_deactivate:2;
88         unsigned int force_deactivate:1;
89         unsigned int skipped_deactivate:1;
90
91         /* Writepage batching in laptop mode; RECLAIM_WRITE */
92         unsigned int may_writepage:1;
93
94         /* Can mapped pages be reclaimed? */
95         unsigned int may_unmap:1;
96
97         /* Can pages be swapped as part of reclaim? */
98         unsigned int may_swap:1;
99
100         /*
101          * Cgroups are not reclaimed below their configured memory.low,
102          * unless we threaten to OOM. If any cgroups are skipped due to
103          * memory.low and nothing was reclaimed, go back for memory.low.
104          */
105         unsigned int memcg_low_reclaim:1;
106         unsigned int memcg_low_skipped:1;
107
108         unsigned int hibernation_mode:1;
109
110         /* One of the zones is ready for compaction */
111         unsigned int compaction_ready:1;
112
113         /* There is easily reclaimable cold cache in the current node */
114         unsigned int cache_trim_mode:1;
115
116         /* The file pages on the current node are dangerously low */
117         unsigned int file_is_tiny:1;
118
119 #ifdef CONFIG_LRU_GEN
120         /* help kswapd make better choices among multiple memcgs */
121         unsigned int memcgs_need_aging:1;
122         unsigned long last_reclaimed;
123 #endif
124
125         /* Allocation order */
126         s8 order;
127
128         /* Scan (total_size >> priority) pages at once */
129         s8 priority;
130
131         /* The highest zone to isolate pages for reclaim from */
132         s8 reclaim_idx;
133
134         /* This context's GFP mask */
135         gfp_t gfp_mask;
136
137         /* Incremented by the number of inactive pages that were scanned */
138         unsigned long nr_scanned;
139
140         /* Number of pages freed so far during a call to shrink_zones() */
141         unsigned long nr_reclaimed;
142
143         struct {
144                 unsigned int dirty;
145                 unsigned int unqueued_dirty;
146                 unsigned int congested;
147                 unsigned int writeback;
148                 unsigned int immediate;
149                 unsigned int file_taken;
150                 unsigned int taken;
151         } nr;
152
153         /* for recording the reclaimed slab by now */
154         struct reclaim_state reclaim_state;
155 };
156
157 #ifdef ARCH_HAS_PREFETCH
158 #define prefetch_prev_lru_page(_page, _base, _field)                    \
159         do {                                                            \
160                 if ((_page)->lru.prev != _base) {                       \
161                         struct page *prev;                              \
162                                                                         \
163                         prev = lru_to_page(&(_page->lru));              \
164                         prefetch(&prev->_field);                        \
165                 }                                                       \
166         } while (0)
167 #else
168 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
169 #endif
170
171 #ifdef ARCH_HAS_PREFETCHW
172 #define prefetchw_prev_lru_page(_page, _base, _field)                   \
173         do {                                                            \
174                 if ((_page)->lru.prev != _base) {                       \
175                         struct page *prev;                              \
176                                                                         \
177                         prev = lru_to_page(&(_page->lru));              \
178                         prefetchw(&prev->_field);                       \
179                 }                                                       \
180         } while (0)
181 #else
182 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
183 #endif
184
185 /*
186  * From 0 .. 100.  Higher means more swappy.
187  */
188 int vm_swappiness = 60;
189 /*
190  * The total number of pages which are beyond the high watermark within all
191  * zones.
192  */
193 unsigned long vm_total_pages;
194
195 static void set_task_reclaim_state(struct task_struct *task,
196                                    struct reclaim_state *rs)
197 {
198         /* Check for an overwrite */
199         WARN_ON_ONCE(rs && task->reclaim_state);
200
201         /* Check for the nulling of an already-nulled member */
202         WARN_ON_ONCE(!rs && !task->reclaim_state);
203
204         task->reclaim_state = rs;
205 }
206
207 static LIST_HEAD(shrinker_list);
208 static DECLARE_RWSEM(shrinker_rwsem);
209
210 #ifdef CONFIG_MEMCG
211 /*
212  * We allow subsystems to populate their shrinker-related
213  * LRU lists before register_shrinker_prepared() is called
214  * for the shrinker, since we don't want to impose
215  * restrictions on their internal registration order.
216  * In this case shrink_slab_memcg() may find corresponding
217  * bit is set in the shrinkers map.
218  *
219  * This value is used by the function to detect registering
220  * shrinkers and to skip do_shrink_slab() calls for them.
221  */
222 #define SHRINKER_REGISTERING ((struct shrinker *)~0UL)
223
224 static DEFINE_IDR(shrinker_idr);
225 static int shrinker_nr_max;
226
227 static int prealloc_memcg_shrinker(struct shrinker *shrinker)
228 {
229         int id, ret = -ENOMEM;
230
231         down_write(&shrinker_rwsem);
232         /* This may call shrinker, so it must use down_read_trylock() */
233         id = idr_alloc(&shrinker_idr, SHRINKER_REGISTERING, 0, 0, GFP_KERNEL);
234         if (id < 0)
235                 goto unlock;
236
237         if (id >= shrinker_nr_max) {
238                 if (memcg_expand_shrinker_maps(id)) {
239                         idr_remove(&shrinker_idr, id);
240                         goto unlock;
241                 }
242
243                 shrinker_nr_max = id + 1;
244         }
245         shrinker->id = id;
246         ret = 0;
247 unlock:
248         up_write(&shrinker_rwsem);
249         return ret;
250 }
251
252 static void unregister_memcg_shrinker(struct shrinker *shrinker)
253 {
254         int id = shrinker->id;
255
256         BUG_ON(id < 0);
257
258         down_write(&shrinker_rwsem);
259         idr_remove(&shrinker_idr, id);
260         up_write(&shrinker_rwsem);
261 }
262
263 static bool cgroup_reclaim(struct scan_control *sc)
264 {
265         return sc->target_mem_cgroup;
266 }
267
268 /**
269  * writeback_throttling_sane - is the usual dirty throttling mechanism available?
270  * @sc: scan_control in question
271  *
272  * The normal page dirty throttling mechanism in balance_dirty_pages() is
273  * completely broken with the legacy memcg and direct stalling in
274  * shrink_page_list() is used for throttling instead, which lacks all the
275  * niceties such as fairness, adaptive pausing, bandwidth proportional
276  * allocation and configurability.
277  *
278  * This function tests whether the vmscan currently in progress can assume
279  * that the normal dirty throttling mechanism is operational.
280  */
281 static bool writeback_throttling_sane(struct scan_control *sc)
282 {
283         if (!cgroup_reclaim(sc))
284                 return true;
285 #ifdef CONFIG_CGROUP_WRITEBACK
286         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
287                 return true;
288 #endif
289         return false;
290 }
291 #else
292 static int prealloc_memcg_shrinker(struct shrinker *shrinker)
293 {
294         return 0;
295 }
296
297 static void unregister_memcg_shrinker(struct shrinker *shrinker)
298 {
299 }
300
301 static bool cgroup_reclaim(struct scan_control *sc)
302 {
303         return false;
304 }
305
306 static bool writeback_throttling_sane(struct scan_control *sc)
307 {
308         return true;
309 }
310 #endif
311
312 /*
313  * This misses isolated pages which are not accounted for to save counters.
314  * As the data only determines if reclaim or compaction continues, it is
315  * not expected that isolated pages will be a dominating factor.
316  */
317 unsigned long zone_reclaimable_pages(struct zone *zone)
318 {
319         unsigned long nr;
320
321         nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
322                 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
323         if (get_nr_swap_pages() > 0)
324                 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
325                         zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
326
327         return nr;
328 }
329
330 /**
331  * lruvec_lru_size -  Returns the number of pages on the given LRU list.
332  * @lruvec: lru vector
333  * @lru: lru to use
334  * @zone_idx: zones to consider (use MAX_NR_ZONES for the whole LRU list)
335  */
336 unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
337 {
338         unsigned long size = 0;
339         int zid;
340
341         for (zid = 0; zid <= zone_idx && zid < MAX_NR_ZONES; zid++) {
342                 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
343
344                 if (!managed_zone(zone))
345                         continue;
346
347                 if (!mem_cgroup_disabled())
348                         size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
349                 else
350                         size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru);
351         }
352         return size;
353 }
354
355 /*
356  * Add a shrinker callback to be called from the vm.
357  */
358 int prealloc_shrinker(struct shrinker *shrinker)
359 {
360         unsigned int size = sizeof(*shrinker->nr_deferred);
361
362         if (shrinker->flags & SHRINKER_NUMA_AWARE)
363                 size *= nr_node_ids;
364
365         shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
366         if (!shrinker->nr_deferred)
367                 return -ENOMEM;
368
369         if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
370                 if (prealloc_memcg_shrinker(shrinker))
371                         goto free_deferred;
372         }
373
374         return 0;
375
376 free_deferred:
377         kfree(shrinker->nr_deferred);
378         shrinker->nr_deferred = NULL;
379         return -ENOMEM;
380 }
381
382 void free_prealloced_shrinker(struct shrinker *shrinker)
383 {
384         if (!shrinker->nr_deferred)
385                 return;
386
387         if (shrinker->flags & SHRINKER_MEMCG_AWARE)
388                 unregister_memcg_shrinker(shrinker);
389
390         kfree(shrinker->nr_deferred);
391         shrinker->nr_deferred = NULL;
392 }
393
394 void register_shrinker_prepared(struct shrinker *shrinker)
395 {
396         down_write(&shrinker_rwsem);
397         list_add_tail(&shrinker->list, &shrinker_list);
398 #ifdef CONFIG_MEMCG
399         if (shrinker->flags & SHRINKER_MEMCG_AWARE)
400                 idr_replace(&shrinker_idr, shrinker, shrinker->id);
401 #endif
402         up_write(&shrinker_rwsem);
403 }
404
405 int register_shrinker(struct shrinker *shrinker)
406 {
407         int err = prealloc_shrinker(shrinker);
408
409         if (err)
410                 return err;
411         register_shrinker_prepared(shrinker);
412         return 0;
413 }
414 EXPORT_SYMBOL(register_shrinker);
415
416 /*
417  * Remove one
418  */
419 void unregister_shrinker(struct shrinker *shrinker)
420 {
421         if (!shrinker->nr_deferred)
422                 return;
423         if (shrinker->flags & SHRINKER_MEMCG_AWARE)
424                 unregister_memcg_shrinker(shrinker);
425         down_write(&shrinker_rwsem);
426         list_del(&shrinker->list);
427         up_write(&shrinker_rwsem);
428         kfree(shrinker->nr_deferred);
429         shrinker->nr_deferred = NULL;
430 }
431 EXPORT_SYMBOL(unregister_shrinker);
432
433 #define SHRINK_BATCH 128
434
435 static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
436                                     struct shrinker *shrinker, int priority)
437 {
438         unsigned long freed = 0;
439         unsigned long long delta;
440         long total_scan;
441         long freeable;
442         long nr;
443         long new_nr;
444         int nid = shrinkctl->nid;
445         long batch_size = shrinker->batch ? shrinker->batch
446                                           : SHRINK_BATCH;
447         long scanned = 0, next_deferred;
448
449         if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
450                 nid = 0;
451
452         freeable = shrinker->count_objects(shrinker, shrinkctl);
453         if (freeable == 0 || freeable == SHRINK_EMPTY)
454                 return freeable;
455
456         /*
457          * copy the current shrinker scan count into a local variable
458          * and zero it so that other concurrent shrinker invocations
459          * don't also do this scanning work.
460          */
461         nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
462
463         total_scan = nr;
464         if (shrinker->seeks) {
465                 delta = freeable >> priority;
466                 delta *= 4;
467                 do_div(delta, shrinker->seeks);
468         } else {
469                 /*
470                  * These objects don't require any IO to create. Trim
471                  * them aggressively under memory pressure to keep
472                  * them from causing refetches in the IO caches.
473                  */
474                 delta = freeable / 2;
475         }
476
477         total_scan += delta;
478         if (total_scan < 0) {
479                 pr_err("shrink_slab: %pS negative objects to delete nr=%ld\n",
480                        shrinker->scan_objects, total_scan);
481                 total_scan = freeable;
482                 next_deferred = nr;
483         } else
484                 next_deferred = total_scan;
485
486         /*
487          * We need to avoid excessive windup on filesystem shrinkers
488          * due to large numbers of GFP_NOFS allocations causing the
489          * shrinkers to return -1 all the time. This results in a large
490          * nr being built up so when a shrink that can do some work
491          * comes along it empties the entire cache due to nr >>>
492          * freeable. This is bad for sustaining a working set in
493          * memory.
494          *
495          * Hence only allow the shrinker to scan the entire cache when
496          * a large delta change is calculated directly.
497          */
498         if (delta < freeable / 4)
499                 total_scan = min(total_scan, freeable / 2);
500
501         /*
502          * Avoid risking looping forever due to too large nr value:
503          * never try to free more than twice the estimate number of
504          * freeable entries.
505          */
506         if (total_scan > freeable * 2)
507                 total_scan = freeable * 2;
508
509         trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
510                                    freeable, delta, total_scan, priority);
511
512         /*
513          * Normally, we should not scan less than batch_size objects in one
514          * pass to avoid too frequent shrinker calls, but if the slab has less
515          * than batch_size objects in total and we are really tight on memory,
516          * we will try to reclaim all available objects, otherwise we can end
517          * up failing allocations although there are plenty of reclaimable
518          * objects spread over several slabs with usage less than the
519          * batch_size.
520          *
521          * We detect the "tight on memory" situations by looking at the total
522          * number of objects we want to scan (total_scan). If it is greater
523          * than the total number of objects on slab (freeable), we must be
524          * scanning at high prio and therefore should try to reclaim as much as
525          * possible.
526          */
527         while (total_scan >= batch_size ||
528                total_scan >= freeable) {
529                 unsigned long ret;
530                 unsigned long nr_to_scan = min(batch_size, total_scan);
531
532                 shrinkctl->nr_to_scan = nr_to_scan;
533                 shrinkctl->nr_scanned = nr_to_scan;
534                 ret = shrinker->scan_objects(shrinker, shrinkctl);
535                 if (ret == SHRINK_STOP)
536                         break;
537                 freed += ret;
538
539                 count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned);
540                 total_scan -= shrinkctl->nr_scanned;
541                 scanned += shrinkctl->nr_scanned;
542
543                 cond_resched();
544         }
545
546         if (next_deferred >= scanned)
547                 next_deferred -= scanned;
548         else
549                 next_deferred = 0;
550         /*
551          * move the unused scan count back into the shrinker in a
552          * manner that handles concurrent updates. If we exhausted the
553          * scan, there is no need to do an update.
554          */
555         if (next_deferred > 0)
556                 new_nr = atomic_long_add_return(next_deferred,
557                                                 &shrinker->nr_deferred[nid]);
558         else
559                 new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
560
561         trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan);
562         return freed;
563 }
564
565 #ifdef CONFIG_MEMCG
566 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
567                         struct mem_cgroup *memcg, int priority)
568 {
569         struct memcg_shrinker_map *map;
570         unsigned long ret, freed = 0;
571         int i;
572
573         if (!mem_cgroup_online(memcg))
574                 return 0;
575
576         if (!down_read_trylock(&shrinker_rwsem))
577                 return 0;
578
579         map = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_map,
580                                         true);
581         if (unlikely(!map))
582                 goto unlock;
583
584         for_each_set_bit(i, map->map, shrinker_nr_max) {
585                 struct shrink_control sc = {
586                         .gfp_mask = gfp_mask,
587                         .nid = nid,
588                         .memcg = memcg,
589                 };
590                 struct shrinker *shrinker;
591
592                 shrinker = idr_find(&shrinker_idr, i);
593                 if (unlikely(!shrinker || shrinker == SHRINKER_REGISTERING)) {
594                         if (!shrinker)
595                                 clear_bit(i, map->map);
596                         continue;
597                 }
598
599                 /* Call non-slab shrinkers even though kmem is disabled */
600                 if (!memcg_kmem_enabled() &&
601                     !(shrinker->flags & SHRINKER_NONSLAB))
602                         continue;
603
604                 ret = do_shrink_slab(&sc, shrinker, priority);
605                 if (ret == SHRINK_EMPTY) {
606                         clear_bit(i, map->map);
607                         /*
608                          * After the shrinker reported that it had no objects to
609                          * free, but before we cleared the corresponding bit in
610                          * the memcg shrinker map, a new object might have been
611                          * added. To make sure, we have the bit set in this
612                          * case, we invoke the shrinker one more time and reset
613                          * the bit if it reports that it is not empty anymore.
614                          * The memory barrier here pairs with the barrier in
615                          * memcg_set_shrinker_bit():
616                          *
617                          * list_lru_add()     shrink_slab_memcg()
618                          *   list_add_tail()    clear_bit()
619                          *   <MB>               <MB>
620                          *   set_bit()          do_shrink_slab()
621                          */
622                         smp_mb__after_atomic();
623                         ret = do_shrink_slab(&sc, shrinker, priority);
624                         if (ret == SHRINK_EMPTY)
625                                 ret = 0;
626                         else
627                                 memcg_set_shrinker_bit(memcg, nid, i);
628                 }
629                 freed += ret;
630
631                 if (rwsem_is_contended(&shrinker_rwsem)) {
632                         freed = freed ? : 1;
633                         break;
634                 }
635         }
636 unlock:
637         up_read(&shrinker_rwsem);
638         return freed;
639 }
640 #else /* CONFIG_MEMCG */
641 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
642                         struct mem_cgroup *memcg, int priority)
643 {
644         return 0;
645 }
646 #endif /* CONFIG_MEMCG */
647
648 /**
649  * shrink_slab - shrink slab caches
650  * @gfp_mask: allocation context
651  * @nid: node whose slab caches to target
652  * @memcg: memory cgroup whose slab caches to target
653  * @priority: the reclaim priority
654  *
655  * Call the shrink functions to age shrinkable caches.
656  *
657  * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set,
658  * unaware shrinkers will receive a node id of 0 instead.
659  *
660  * @memcg specifies the memory cgroup to target. Unaware shrinkers
661  * are called only if it is the root cgroup.
662  *
663  * @priority is sc->priority, we take the number of objects and >> by priority
664  * in order to get the scan target.
665  *
666  * Returns the number of reclaimed slab objects.
667  */
668 static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
669                                  struct mem_cgroup *memcg,
670                                  int priority)
671 {
672         unsigned long ret, freed = 0;
673         struct shrinker *shrinker;
674
675         /*
676          * The root memcg might be allocated even though memcg is disabled
677          * via "cgroup_disable=memory" boot parameter.  This could make
678          * mem_cgroup_is_root() return false, then just run memcg slab
679          * shrink, but skip global shrink.  This may result in premature
680          * oom.
681          */
682         if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
683                 return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
684
685         if (!down_read_trylock(&shrinker_rwsem))
686                 goto out;
687
688         list_for_each_entry(shrinker, &shrinker_list, list) {
689                 struct shrink_control sc = {
690                         .gfp_mask = gfp_mask,
691                         .nid = nid,
692                         .memcg = memcg,
693                 };
694
695                 ret = do_shrink_slab(&sc, shrinker, priority);
696                 if (ret == SHRINK_EMPTY)
697                         ret = 0;
698                 freed += ret;
699                 /*
700                  * Bail out if someone want to register a new shrinker to
701                  * prevent the regsitration from being stalled for long periods
702                  * by parallel ongoing shrinking.
703                  */
704                 if (rwsem_is_contended(&shrinker_rwsem)) {
705                         freed = freed ? : 1;
706                         break;
707                 }
708         }
709
710         up_read(&shrinker_rwsem);
711 out:
712         cond_resched();
713         return freed;
714 }
715
716 void drop_slab_node(int nid)
717 {
718         unsigned long freed;
719
720         do {
721                 struct mem_cgroup *memcg = NULL;
722
723                 freed = 0;
724                 memcg = mem_cgroup_iter(NULL, NULL, NULL);
725                 do {
726                         freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
727                 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
728         } while (freed > 10);
729 }
730
731 void drop_slab(void)
732 {
733         int nid;
734
735         for_each_online_node(nid)
736                 drop_slab_node(nid);
737 }
738
739 static inline int is_page_cache_freeable(struct page *page)
740 {
741         /*
742          * A freeable page cache page is referenced only by the caller
743          * that isolated the page, the page cache and optional buffer
744          * heads at page->private.
745          */
746         int page_cache_pins = PageTransHuge(page) && PageSwapCache(page) ?
747                 HPAGE_PMD_NR : 1;
748         return page_count(page) - page_has_private(page) == 1 + page_cache_pins;
749 }
750
751 static int may_write_to_inode(struct inode *inode, struct scan_control *sc)
752 {
753         if (current->flags & PF_SWAPWRITE)
754                 return 1;
755         if (!inode_write_congested(inode))
756                 return 1;
757         if (inode_to_bdi(inode) == current->backing_dev_info)
758                 return 1;
759         return 0;
760 }
761
762 /*
763  * We detected a synchronous write error writing a page out.  Probably
764  * -ENOSPC.  We need to propagate that into the address_space for a subsequent
765  * fsync(), msync() or close().
766  *
767  * The tricky part is that after writepage we cannot touch the mapping: nothing
768  * prevents it from being freed up.  But we have a ref on the page and once
769  * that page is locked, the mapping is pinned.
770  *
771  * We're allowed to run sleeping lock_page() here because we know the caller has
772  * __GFP_FS.
773  */
774 static void handle_write_error(struct address_space *mapping,
775                                 struct page *page, int error)
776 {
777         lock_page(page);
778         if (page_mapping(page) == mapping)
779                 mapping_set_error(mapping, error);
780         unlock_page(page);
781 }
782
783 /* possible outcome of pageout() */
784 typedef enum {
785         /* failed to write page out, page is locked */
786         PAGE_KEEP,
787         /* move page to the active list, page is locked */
788         PAGE_ACTIVATE,
789         /* page has been sent to the disk successfully, page is unlocked */
790         PAGE_SUCCESS,
791         /* page is clean and locked */
792         PAGE_CLEAN,
793 } pageout_t;
794
795 /*
796  * pageout is called by shrink_page_list() for each dirty page.
797  * Calls ->writepage().
798  */
799 static pageout_t pageout(struct page *page, struct address_space *mapping,
800                          struct scan_control *sc)
801 {
802         /*
803          * If the page is dirty, only perform writeback if that write
804          * will be non-blocking.  To prevent this allocation from being
805          * stalled by pagecache activity.  But note that there may be
806          * stalls if we need to run get_block().  We could test
807          * PagePrivate for that.
808          *
809          * If this process is currently in __generic_file_write_iter() against
810          * this page's queue, we can perform writeback even if that
811          * will block.
812          *
813          * If the page is swapcache, write it back even if that would
814          * block, for some throttling. This happens by accident, because
815          * swap_backing_dev_info is bust: it doesn't reflect the
816          * congestion state of the swapdevs.  Easy to fix, if needed.
817          */
818         if (!is_page_cache_freeable(page))
819                 return PAGE_KEEP;
820         if (!mapping) {
821                 /*
822                  * Some data journaling orphaned pages can have
823                  * page->mapping == NULL while being dirty with clean buffers.
824                  */
825                 if (page_has_private(page)) {
826                         if (try_to_free_buffers(page)) {
827                                 ClearPageDirty(page);
828                                 pr_info("%s: orphaned page\n", __func__);
829                                 return PAGE_CLEAN;
830                         }
831                 }
832                 return PAGE_KEEP;
833         }
834         if (mapping->a_ops->writepage == NULL)
835                 return PAGE_ACTIVATE;
836         if (!may_write_to_inode(mapping->host, sc))
837                 return PAGE_KEEP;
838
839         if (clear_page_dirty_for_io(page)) {
840                 int res;
841                 struct writeback_control wbc = {
842                         .sync_mode = WB_SYNC_NONE,
843                         .nr_to_write = SWAP_CLUSTER_MAX,
844                         .range_start = 0,
845                         .range_end = LLONG_MAX,
846                         .for_reclaim = 1,
847                 };
848
849                 SetPageReclaim(page);
850                 res = mapping->a_ops->writepage(page, &wbc);
851                 if (res < 0)
852                         handle_write_error(mapping, page, res);
853                 if (res == AOP_WRITEPAGE_ACTIVATE) {
854                         ClearPageReclaim(page);
855                         return PAGE_ACTIVATE;
856                 }
857
858                 if (!PageWriteback(page)) {
859                         /* synchronous write or broken a_ops? */
860                         ClearPageReclaim(page);
861                 }
862                 trace_mm_vmscan_writepage(page);
863                 inc_node_page_state(page, NR_VMSCAN_WRITE);
864                 return PAGE_SUCCESS;
865         }
866
867         return PAGE_CLEAN;
868 }
869
870 /*
871  * Same as remove_mapping, but if the page is removed from the mapping, it
872  * gets returned with a refcount of 0.
873  */
874 static int __remove_mapping(struct address_space *mapping, struct page *page,
875                             bool reclaimed, struct mem_cgroup *target_memcg)
876 {
877         unsigned long flags;
878         int refcount;
879
880         BUG_ON(!PageLocked(page));
881         BUG_ON(mapping != page_mapping(page));
882
883         xa_lock_irqsave(&mapping->i_pages, flags);
884         /*
885          * The non racy check for a busy page.
886          *
887          * Must be careful with the order of the tests. When someone has
888          * a ref to the page, it may be possible that they dirty it then
889          * drop the reference. So if PageDirty is tested before page_count
890          * here, then the following race may occur:
891          *
892          * get_user_pages(&page);
893          * [user mapping goes away]
894          * write_to(page);
895          *                              !PageDirty(page)    [good]
896          * SetPageDirty(page);
897          * put_page(page);
898          *                              !page_count(page)   [good, discard it]
899          *
900          * [oops, our write_to data is lost]
901          *
902          * Reversing the order of the tests ensures such a situation cannot
903          * escape unnoticed. The smp_rmb is needed to ensure the page->flags
904          * load is not satisfied before that of page->_refcount.
905          *
906          * Note that if SetPageDirty is always performed via set_page_dirty,
907          * and thus under the i_pages lock, then this ordering is not required.
908          */
909         refcount = 1 + compound_nr(page);
910         if (!page_ref_freeze(page, refcount))
911                 goto cannot_free;
912         /* note: atomic_cmpxchg in page_ref_freeze provides the smp_rmb */
913         if (unlikely(PageDirty(page))) {
914                 page_ref_unfreeze(page, refcount);
915                 goto cannot_free;
916         }
917
918         if (PageSwapCache(page)) {
919                 swp_entry_t swap = { .val = page_private(page) };
920                 mem_cgroup_swapout(page, swap);
921                 __delete_from_swap_cache(page, swap);
922                 xa_unlock_irqrestore(&mapping->i_pages, flags);
923                 put_swap_page(page, swap);
924         } else {
925                 void (*freepage)(struct page *);
926                 void *shadow = NULL;
927
928                 freepage = mapping->a_ops->freepage;
929                 /*
930                  * Remember a shadow entry for reclaimed file cache in
931                  * order to detect refaults, thus thrashing, later on.
932                  *
933                  * But don't store shadows in an address space that is
934                  * already exiting.  This is not just an optizimation,
935                  * inode reclaim needs to empty out the radix tree or
936                  * the nodes are lost.  Don't plant shadows behind its
937                  * back.
938                  *
939                  * We also don't store shadows for DAX mappings because the
940                  * only page cache pages found in these are zero pages
941                  * covering holes, and because we don't want to mix DAX
942                  * exceptional entries and shadow exceptional entries in the
943                  * same address_space.
944                  */
945                 if (reclaimed && page_is_file_cache(page) &&
946                     !mapping_exiting(mapping) && !dax_mapping(mapping))
947                         shadow = workingset_eviction(page, target_memcg);
948                 __delete_from_page_cache(page, shadow);
949                 xa_unlock_irqrestore(&mapping->i_pages, flags);
950
951                 if (freepage != NULL)
952                         freepage(page);
953         }
954
955         return 1;
956
957 cannot_free:
958         xa_unlock_irqrestore(&mapping->i_pages, flags);
959         return 0;
960 }
961
962 /*
963  * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
964  * someone else has a ref on the page, abort and return 0.  If it was
965  * successfully detached, return 1.  Assumes the caller has a single ref on
966  * this page.
967  */
968 int remove_mapping(struct address_space *mapping, struct page *page)
969 {
970         if (__remove_mapping(mapping, page, false, NULL)) {
971                 /*
972                  * Unfreezing the refcount with 1 rather than 2 effectively
973                  * drops the pagecache ref for us without requiring another
974                  * atomic operation.
975                  */
976                 page_ref_unfreeze(page, 1);
977                 return 1;
978         }
979         return 0;
980 }
981
982 /**
983  * putback_lru_page - put previously isolated page onto appropriate LRU list
984  * @page: page to be put back to appropriate lru list
985  *
986  * Add previously isolated @page to appropriate LRU list.
987  * Page may still be unevictable for other reasons.
988  *
989  * lru_lock must not be held, interrupts must be enabled.
990  */
991 void putback_lru_page(struct page *page)
992 {
993         lru_cache_add(page);
994         put_page(page);         /* drop ref from isolate */
995 }
996
997 enum page_references {
998         PAGEREF_RECLAIM,
999         PAGEREF_RECLAIM_CLEAN,
1000         PAGEREF_KEEP,
1001         PAGEREF_ACTIVATE,
1002 };
1003
1004 static enum page_references page_check_references(struct page *page,
1005                                                   struct scan_control *sc)
1006 {
1007         int referenced_ptes, referenced_page;
1008         unsigned long vm_flags;
1009
1010         referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
1011                                           &vm_flags);
1012         referenced_page = TestClearPageReferenced(page);
1013
1014         /*
1015          * Mlock lost the isolation race with us.  Let try_to_unmap()
1016          * move the page to the unevictable list.
1017          */
1018         if (vm_flags & VM_LOCKED)
1019                 return PAGEREF_RECLAIM;
1020
1021         if (referenced_ptes) {
1022                 if (PageSwapBacked(page))
1023                         return PAGEREF_ACTIVATE;
1024                 /*
1025                  * All mapped pages start out with page table
1026                  * references from the instantiating fault, so we need
1027                  * to look twice if a mapped file page is used more
1028                  * than once.
1029                  *
1030                  * Mark it and spare it for another trip around the
1031                  * inactive list.  Another page table reference will
1032                  * lead to its activation.
1033                  *
1034                  * Note: the mark is set for activated pages as well
1035                  * so that recently deactivated but used pages are
1036                  * quickly recovered.
1037                  */
1038                 SetPageReferenced(page);
1039
1040                 if (referenced_page || referenced_ptes > 1)
1041                         return PAGEREF_ACTIVATE;
1042
1043                 /*
1044                  * Activate file-backed executable pages after first usage.
1045                  */
1046                 if (vm_flags & VM_EXEC)
1047                         return PAGEREF_ACTIVATE;
1048
1049                 return PAGEREF_KEEP;
1050         }
1051
1052         /* Reclaim if clean, defer dirty pages to writeback */
1053         if (referenced_page && !PageSwapBacked(page))
1054                 return PAGEREF_RECLAIM_CLEAN;
1055
1056         return PAGEREF_RECLAIM;
1057 }
1058
1059 /* Check if a page is dirty or under writeback */
1060 static void page_check_dirty_writeback(struct page *page,
1061                                        bool *dirty, bool *writeback)
1062 {
1063         struct address_space *mapping;
1064
1065         /*
1066          * Anonymous pages are not handled by flushers and must be written
1067          * from reclaim context. Do not stall reclaim based on them
1068          */
1069         if (!page_is_file_cache(page) ||
1070             (PageAnon(page) && !PageSwapBacked(page))) {
1071                 *dirty = false;
1072                 *writeback = false;
1073                 return;
1074         }
1075
1076         /* By default assume that the page flags are accurate */
1077         *dirty = PageDirty(page);
1078         *writeback = PageWriteback(page);
1079
1080         /* Verify dirty/writeback state if the filesystem supports it */
1081         if (!page_has_private(page))
1082                 return;
1083
1084         mapping = page_mapping(page);
1085         if (mapping && mapping->a_ops->is_dirty_writeback)
1086                 mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
1087 }
1088
1089 /*
1090  * shrink_page_list() returns the number of reclaimed pages
1091  */
1092 static unsigned long shrink_page_list(struct list_head *page_list,
1093                                       struct pglist_data *pgdat,
1094                                       struct scan_control *sc,
1095                                       struct reclaim_stat *stat,
1096                                       bool ignore_references)
1097 {
1098         LIST_HEAD(ret_pages);
1099         LIST_HEAD(free_pages);
1100         unsigned nr_reclaimed = 0;
1101         unsigned pgactivate = 0;
1102
1103         memset(stat, 0, sizeof(*stat));
1104         cond_resched();
1105
1106         while (!list_empty(page_list)) {
1107                 struct address_space *mapping;
1108                 struct page *page;
1109                 int may_enter_fs;
1110                 enum page_references references = PAGEREF_RECLAIM;
1111                 bool dirty, writeback;
1112                 unsigned int nr_pages;
1113
1114                 cond_resched();
1115
1116                 page = lru_to_page(page_list);
1117                 list_del(&page->lru);
1118
1119                 if (!trylock_page(page))
1120                         goto keep;
1121
1122                 VM_BUG_ON_PAGE(PageActive(page), page);
1123
1124                 nr_pages = compound_nr(page);
1125
1126                 /* Account the number of base pages even though THP */
1127                 sc->nr_scanned += nr_pages;
1128
1129                 if (unlikely(!page_evictable(page)))
1130                         goto activate_locked;
1131
1132                 if (!sc->may_unmap && page_mapped(page))
1133                         goto keep_locked;
1134
1135                 /* page_update_gen() tried to promote this page? */
1136                 if (lru_gen_enabled() && !ignore_references &&
1137                     page_mapped(page) && PageReferenced(page))
1138                         goto keep_locked;
1139
1140                 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
1141                         (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
1142
1143                 /*
1144                  * The number of dirty pages determines if a node is marked
1145                  * reclaim_congested which affects wait_iff_congested. kswapd
1146                  * will stall and start writing pages if the tail of the LRU
1147                  * is all dirty unqueued pages.
1148                  */
1149                 page_check_dirty_writeback(page, &dirty, &writeback);
1150                 if (dirty || writeback)
1151                         stat->nr_dirty++;
1152
1153                 if (dirty && !writeback)
1154                         stat->nr_unqueued_dirty++;
1155
1156                 /*
1157                  * Treat this page as congested if the underlying BDI is or if
1158                  * pages are cycling through the LRU so quickly that the
1159                  * pages marked for immediate reclaim are making it to the
1160                  * end of the LRU a second time.
1161                  */
1162                 mapping = page_mapping(page);
1163                 if (((dirty || writeback) && mapping &&
1164                      inode_write_congested(mapping->host)) ||
1165                     (writeback && PageReclaim(page)))
1166                         stat->nr_congested++;
1167
1168                 /*
1169                  * If a page at the tail of the LRU is under writeback, there
1170                  * are three cases to consider.
1171                  *
1172                  * 1) If reclaim is encountering an excessive number of pages
1173                  *    under writeback and this page is both under writeback and
1174                  *    PageReclaim then it indicates that pages are being queued
1175                  *    for IO but are being recycled through the LRU before the
1176                  *    IO can complete. Waiting on the page itself risks an
1177                  *    indefinite stall if it is impossible to writeback the
1178                  *    page due to IO error or disconnected storage so instead
1179                  *    note that the LRU is being scanned too quickly and the
1180                  *    caller can stall after page list has been processed.
1181                  *
1182                  * 2) Global or new memcg reclaim encounters a page that is
1183                  *    not marked for immediate reclaim, or the caller does not
1184                  *    have __GFP_FS (or __GFP_IO if it's simply going to swap,
1185                  *    not to fs). In this case mark the page for immediate
1186                  *    reclaim and continue scanning.
1187                  *
1188                  *    Require may_enter_fs because we would wait on fs, which
1189                  *    may not have submitted IO yet. And the loop driver might
1190                  *    enter reclaim, and deadlock if it waits on a page for
1191                  *    which it is needed to do the write (loop masks off
1192                  *    __GFP_IO|__GFP_FS for this reason); but more thought
1193                  *    would probably show more reasons.
1194                  *
1195                  * 3) Legacy memcg encounters a page that is already marked
1196                  *    PageReclaim. memcg does not have any dirty pages
1197                  *    throttling so we could easily OOM just because too many
1198                  *    pages are in writeback and there is nothing else to
1199                  *    reclaim. Wait for the writeback to complete.
1200                  *
1201                  * In cases 1) and 2) we activate the pages to get them out of
1202                  * the way while we continue scanning for clean pages on the
1203                  * inactive list and refilling from the active list. The
1204                  * observation here is that waiting for disk writes is more
1205                  * expensive than potentially causing reloads down the line.
1206                  * Since they're marked for immediate reclaim, they won't put
1207                  * memory pressure on the cache working set any longer than it
1208                  * takes to write them to disk.
1209                  */
1210                 if (PageWriteback(page)) {
1211                         /* Case 1 above */
1212                         if (current_is_kswapd() &&
1213                             PageReclaim(page) &&
1214                             test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
1215                                 stat->nr_immediate++;
1216                                 goto activate_locked;
1217
1218                         /* Case 2 above */
1219                         } else if (writeback_throttling_sane(sc) ||
1220                             !PageReclaim(page) || !may_enter_fs) {
1221                                 /*
1222                                  * This is slightly racy - end_page_writeback()
1223                                  * might have just cleared PageReclaim, then
1224                                  * setting PageReclaim here end up interpreted
1225                                  * as PageReadahead - but that does not matter
1226                                  * enough to care.  What we do want is for this
1227                                  * page to have PageReclaim set next time memcg
1228                                  * reclaim reaches the tests above, so it will
1229                                  * then wait_on_page_writeback() to avoid OOM;
1230                                  * and it's also appropriate in global reclaim.
1231                                  */
1232                                 SetPageReclaim(page);
1233                                 stat->nr_writeback++;
1234                                 goto activate_locked;
1235
1236                         /* Case 3 above */
1237                         } else {
1238                                 unlock_page(page);
1239                                 wait_on_page_writeback(page);
1240                                 /* then go back and try same page again */
1241                                 list_add_tail(&page->lru, page_list);
1242                                 continue;
1243                         }
1244                 }
1245
1246                 if (!ignore_references)
1247                         references = page_check_references(page, sc);
1248
1249                 switch (references) {
1250                 case PAGEREF_ACTIVATE:
1251                         goto activate_locked;
1252                 case PAGEREF_KEEP:
1253                         stat->nr_ref_keep += nr_pages;
1254                         goto keep_locked;
1255                 case PAGEREF_RECLAIM:
1256                 case PAGEREF_RECLAIM_CLEAN:
1257                         ; /* try to reclaim the page below */
1258                 }
1259
1260                 /*
1261                  * Anonymous process memory has backing store?
1262                  * Try to allocate it some swap space here.
1263                  * Lazyfree page could be freed directly
1264                  */
1265                 if (PageAnon(page) && PageSwapBacked(page)) {
1266                         if (!PageSwapCache(page)) {
1267                                 if (!(sc->gfp_mask & __GFP_IO))
1268                                         goto keep_locked;
1269                                 if (PageTransHuge(page)) {
1270                                         /* cannot split THP, skip it */
1271                                         if (!can_split_huge_page(page, NULL))
1272                                                 goto activate_locked;
1273                                         /*
1274                                          * Split pages without a PMD map right
1275                                          * away. Chances are some or all of the
1276                                          * tail pages can be freed without IO.
1277                                          */
1278                                         if (!compound_mapcount(page) &&
1279                                             split_huge_page_to_list(page,
1280                                                                     page_list))
1281                                                 goto activate_locked;
1282                                 }
1283                                 if (!add_to_swap(page)) {
1284                                         if (!PageTransHuge(page))
1285                                                 goto activate_locked_split;
1286                                         /* Fallback to swap normal pages */
1287                                         if (split_huge_page_to_list(page,
1288                                                                     page_list))
1289                                                 goto activate_locked;
1290 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1291                                         count_vm_event(THP_SWPOUT_FALLBACK);
1292 #endif
1293                                         if (!add_to_swap(page))
1294                                                 goto activate_locked_split;
1295                                 }
1296
1297                                 may_enter_fs = 1;
1298
1299                                 /* Adding to swap updated mapping */
1300                                 mapping = page_mapping(page);
1301                         }
1302                 } else if (unlikely(PageTransHuge(page))) {
1303                         /* Split file THP */
1304                         if (split_huge_page_to_list(page, page_list))
1305                                 goto keep_locked;
1306                 }
1307
1308                 /*
1309                  * THP may get split above, need minus tail pages and update
1310                  * nr_pages to avoid accounting tail pages twice.
1311                  *
1312                  * The tail pages that are added into swap cache successfully
1313                  * reach here.
1314                  */
1315                 if ((nr_pages > 1) && !PageTransHuge(page)) {
1316                         sc->nr_scanned -= (nr_pages - 1);
1317                         nr_pages = 1;
1318                 }
1319
1320                 /*
1321                  * The page is mapped into the page tables of one or more
1322                  * processes. Try to unmap it here.
1323                  */
1324                 if (page_mapped(page)) {
1325                         enum ttu_flags flags = TTU_BATCH_FLUSH;
1326
1327                         if (unlikely(PageTransHuge(page)))
1328                                 flags |= TTU_SPLIT_HUGE_PMD;
1329                         if (!try_to_unmap(page, flags)) {
1330                                 stat->nr_unmap_fail += nr_pages;
1331                                 goto activate_locked;
1332                         }
1333                 }
1334
1335                 if (PageDirty(page)) {
1336                         /*
1337                          * Only kswapd can writeback filesystem pages
1338                          * to avoid risk of stack overflow. But avoid
1339                          * injecting inefficient single-page IO into
1340                          * flusher writeback as much as possible: only
1341                          * write pages when we've encountered many
1342                          * dirty pages, and when we've already scanned
1343                          * the rest of the LRU for clean pages and see
1344                          * the same dirty pages again (PageReclaim).
1345                          */
1346                         if (page_is_file_cache(page) &&
1347                             (!current_is_kswapd() || !PageReclaim(page) ||
1348                              !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
1349                                 /*
1350                                  * Immediately reclaim when written back.
1351                                  * Similar in principal to deactivate_page()
1352                                  * except we already have the page isolated
1353                                  * and know it's dirty
1354                                  */
1355                                 inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
1356                                 SetPageReclaim(page);
1357
1358                                 goto activate_locked;
1359                         }
1360
1361                         if (references == PAGEREF_RECLAIM_CLEAN)
1362                                 goto keep_locked;
1363                         if (!may_enter_fs)
1364                                 goto keep_locked;
1365                         if (!sc->may_writepage)
1366                                 goto keep_locked;
1367
1368                         /*
1369                          * Page is dirty. Flush the TLB if a writable entry
1370                          * potentially exists to avoid CPU writes after IO
1371                          * starts and then write it out here.
1372                          */
1373                         try_to_unmap_flush_dirty();
1374                         switch (pageout(page, mapping, sc)) {
1375                         case PAGE_KEEP:
1376                                 goto keep_locked;
1377                         case PAGE_ACTIVATE:
1378                                 goto activate_locked;
1379                         case PAGE_SUCCESS:
1380                                 if (PageWriteback(page))
1381                                         goto keep;
1382                                 if (PageDirty(page))
1383                                         goto keep;
1384
1385                                 /*
1386                                  * A synchronous write - probably a ramdisk.  Go
1387                                  * ahead and try to reclaim the page.
1388                                  */
1389                                 if (!trylock_page(page))
1390                                         goto keep;
1391                                 if (PageDirty(page) || PageWriteback(page))
1392                                         goto keep_locked;
1393                                 mapping = page_mapping(page);
1394                         case PAGE_CLEAN:
1395                                 ; /* try to free the page below */
1396                         }
1397                 }
1398
1399                 /*
1400                  * If the page has buffers, try to free the buffer mappings
1401                  * associated with this page. If we succeed we try to free
1402                  * the page as well.
1403                  *
1404                  * We do this even if the page is PageDirty().
1405                  * try_to_release_page() does not perform I/O, but it is
1406                  * possible for a page to have PageDirty set, but it is actually
1407                  * clean (all its buffers are clean).  This happens if the
1408                  * buffers were written out directly, with submit_bh(). ext3
1409                  * will do this, as well as the blockdev mapping.
1410                  * try_to_release_page() will discover that cleanness and will
1411                  * drop the buffers and mark the page clean - it can be freed.
1412                  *
1413                  * Rarely, pages can have buffers and no ->mapping.  These are
1414                  * the pages which were not successfully invalidated in
1415                  * truncate_complete_page().  We try to drop those buffers here
1416                  * and if that worked, and the page is no longer mapped into
1417                  * process address space (page_count == 1) it can be freed.
1418                  * Otherwise, leave the page on the LRU so it is swappable.
1419                  */
1420                 if (page_has_private(page)) {
1421                         if (!try_to_release_page(page, sc->gfp_mask))
1422                                 goto activate_locked;
1423                         if (!mapping && page_count(page) == 1) {
1424                                 unlock_page(page);
1425                                 if (put_page_testzero(page))
1426                                         goto free_it;
1427                                 else {
1428                                         /*
1429                                          * rare race with speculative reference.
1430                                          * the speculative reference will free
1431                                          * this page shortly, so we may
1432                                          * increment nr_reclaimed here (and
1433                                          * leave it off the LRU).
1434                                          */
1435                                         nr_reclaimed++;
1436                                         continue;
1437                                 }
1438                         }
1439                 }
1440
1441                 if (PageAnon(page) && !PageSwapBacked(page)) {
1442                         /* follow __remove_mapping for reference */
1443                         if (!page_ref_freeze(page, 1))
1444                                 goto keep_locked;
1445                         if (PageDirty(page)) {
1446                                 page_ref_unfreeze(page, 1);
1447                                 goto keep_locked;
1448                         }
1449
1450                         count_vm_event(PGLAZYFREED);
1451                         count_memcg_page_event(page, PGLAZYFREED);
1452                 } else if (!mapping || !__remove_mapping(mapping, page, true,
1453                                                          sc->target_mem_cgroup))
1454                         goto keep_locked;
1455
1456                 unlock_page(page);
1457 free_it:
1458                 /*
1459                  * THP may get swapped out in a whole, need account
1460                  * all base pages.
1461                  */
1462                 nr_reclaimed += nr_pages;
1463
1464                 /*
1465                  * Is there need to periodically free_page_list? It would
1466                  * appear not as the counts should be low
1467                  */
1468                 if (unlikely(PageTransHuge(page)))
1469                         (*get_compound_page_dtor(page))(page);
1470                 else
1471                         list_add(&page->lru, &free_pages);
1472                 continue;
1473
1474 activate_locked_split:
1475                 /*
1476                  * The tail pages that are failed to add into swap cache
1477                  * reach here.  Fixup nr_scanned and nr_pages.
1478                  */
1479                 if (nr_pages > 1) {
1480                         sc->nr_scanned -= (nr_pages - 1);
1481                         nr_pages = 1;
1482                 }
1483 activate_locked:
1484                 /* Not a candidate for swapping, so reclaim swap space. */
1485                 if (PageSwapCache(page) && (mem_cgroup_swap_full(page) ||
1486                                                 PageMlocked(page)))
1487                         try_to_free_swap(page);
1488                 VM_BUG_ON_PAGE(PageActive(page), page);
1489                 if (!PageMlocked(page)) {
1490                         int type = page_is_file_cache(page);
1491                         SetPageActive(page);
1492                         stat->nr_activate[type] += nr_pages;
1493                         count_memcg_page_event(page, PGACTIVATE);
1494                 }
1495 keep_locked:
1496                 unlock_page(page);
1497 keep:
1498                 list_add(&page->lru, &ret_pages);
1499                 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
1500         }
1501
1502         pgactivate = stat->nr_activate[0] + stat->nr_activate[1];
1503
1504         mem_cgroup_uncharge_list(&free_pages);
1505         try_to_unmap_flush();
1506         free_unref_page_list(&free_pages);
1507
1508         list_splice(&ret_pages, page_list);
1509         count_vm_events(PGACTIVATE, pgactivate);
1510
1511         return nr_reclaimed;
1512 }
1513
1514 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
1515                                             struct list_head *page_list)
1516 {
1517         struct scan_control sc = {
1518                 .gfp_mask = GFP_KERNEL,
1519                 .priority = DEF_PRIORITY,
1520                 .may_unmap = 1,
1521         };
1522         struct reclaim_stat dummy_stat;
1523         unsigned long ret;
1524         struct page *page, *next;
1525         LIST_HEAD(clean_pages);
1526
1527         list_for_each_entry_safe(page, next, page_list, lru) {
1528                 if (page_is_file_cache(page) && !PageDirty(page) &&
1529                     !__PageMovable(page) && !PageUnevictable(page)) {
1530                         ClearPageActive(page);
1531                         list_move(&page->lru, &clean_pages);
1532                 }
1533         }
1534
1535         ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
1536                                 &dummy_stat, true);
1537         list_splice(&clean_pages, page_list);
1538         mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);
1539         return ret;
1540 }
1541
1542 /*
1543  * Attempt to remove the specified page from its LRU.  Only take this page
1544  * if it is of the appropriate PageActive status.  Pages which are being
1545  * freed elsewhere are also ignored.
1546  *
1547  * page:        page to consider
1548  * mode:        one of the LRU isolation modes defined above
1549  *
1550  * returns 0 on success, -ve errno on failure.
1551  */
1552 int __isolate_lru_page(struct page *page, isolate_mode_t mode)
1553 {
1554         int ret = -EBUSY;
1555
1556         /* Only take pages on the LRU. */
1557         if (!PageLRU(page))
1558                 return ret;
1559
1560         /* Compaction should not handle unevictable pages but CMA can do so */
1561         if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
1562                 return ret;
1563
1564         /*
1565          * To minimise LRU disruption, the caller can indicate that it only
1566          * wants to isolate pages it will be able to operate on without
1567          * blocking - clean pages for the most part.
1568          *
1569          * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
1570          * that it is possible to migrate without blocking
1571          */
1572         if (mode & ISOLATE_ASYNC_MIGRATE) {
1573                 /* All the caller can do on PageWriteback is block */
1574                 if (PageWriteback(page))
1575                         return ret;
1576
1577                 if (PageDirty(page)) {
1578                         struct address_space *mapping;
1579                         bool migrate_dirty;
1580
1581                         /*
1582                          * Only pages without mappings or that have a
1583                          * ->migratepage callback are possible to migrate
1584                          * without blocking. However, we can be racing with
1585                          * truncation so it's necessary to lock the page
1586                          * to stabilise the mapping as truncation holds
1587                          * the page lock until after the page is removed
1588                          * from the page cache.
1589                          */
1590                         if (!trylock_page(page))
1591                                 return ret;
1592
1593                         mapping = page_mapping(page);
1594                         migrate_dirty = !mapping || mapping->a_ops->migratepage;
1595                         unlock_page(page);
1596                         if (!migrate_dirty)
1597                                 return ret;
1598                 }
1599         }
1600
1601         if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
1602                 return ret;
1603
1604         if (likely(get_page_unless_zero(page))) {
1605                 /*
1606                  * Be careful not to clear PageLRU until after we're
1607                  * sure the page is not being freed elsewhere -- the
1608                  * page release code relies on it.
1609                  */
1610                 if (TestClearPageLRU(page))
1611                         ret = 0;
1612                 else
1613                         put_page(page);
1614         }
1615
1616         return ret;
1617 }
1618
1619
1620 /*
1621  * Update LRU sizes after isolating pages. The LRU size updates must
1622  * be complete before mem_cgroup_update_lru_size due to a santity check.
1623  */
1624 static __always_inline void update_lru_sizes(struct lruvec *lruvec,
1625                         enum lru_list lru, unsigned long *nr_zone_taken)
1626 {
1627         int zid;
1628
1629         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1630                 if (!nr_zone_taken[zid])
1631                         continue;
1632
1633                 __update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1634 #ifdef CONFIG_MEMCG
1635                 mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1636 #endif
1637         }
1638
1639 }
1640
1641 /**
1642  * pgdat->lru_lock is heavily contended.  Some of the functions that
1643  * shrink the lists perform better by taking out a batch of pages
1644  * and working on them outside the LRU lock.
1645  *
1646  * For pagecache intensive workloads, this function is the hottest
1647  * spot in the kernel (apart from copy_*_user functions).
1648  *
1649  * Appropriate locks must be held before calling this function.
1650  *
1651  * @nr_to_scan: The number of eligible pages to look through on the list.
1652  * @lruvec:     The LRU vector to pull pages from.
1653  * @dst:        The temp list to put pages on to.
1654  * @nr_scanned: The number of pages that were scanned.
1655  * @sc:         The scan_control struct for this reclaim session
1656  * @mode:       One of the LRU isolation modes
1657  * @lru:        LRU list id for isolating
1658  *
1659  * returns how many pages were moved onto *@dst.
1660  */
1661 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1662                 struct lruvec *lruvec, struct list_head *dst,
1663                 unsigned long *nr_scanned, struct scan_control *sc,
1664                 enum lru_list lru)
1665 {
1666         struct list_head *src = &lruvec->lists[lru];
1667         unsigned long nr_taken = 0;
1668         unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
1669         unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
1670         unsigned long skipped = 0;
1671         unsigned long scan, total_scan, nr_pages;
1672         LIST_HEAD(pages_skipped);
1673         isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED);
1674
1675         total_scan = 0;
1676         scan = 0;
1677         while (scan < nr_to_scan && !list_empty(src)) {
1678                 struct page *page;
1679
1680                 page = lru_to_page(src);
1681                 prefetchw_prev_lru_page(page, src, flags);
1682
1683                 nr_pages = compound_nr(page);
1684                 total_scan += nr_pages;
1685
1686                 if (page_zonenum(page) > sc->reclaim_idx) {
1687                         list_move(&page->lru, &pages_skipped);
1688                         nr_skipped[page_zonenum(page)] += nr_pages;
1689                         continue;
1690                 }
1691
1692                 /*
1693                  * Do not count skipped pages because that makes the function
1694                  * return with no isolated pages if the LRU mostly contains
1695                  * ineligible pages.  This causes the VM to not reclaim any
1696                  * pages, triggering a premature OOM.
1697                  *
1698                  * Account all tail pages of THP.  This would not cause
1699                  * premature OOM since __isolate_lru_page() returns -EBUSY
1700                  * only when the page is being freed somewhere else.
1701                  */
1702                 scan += nr_pages;
1703                 switch (__isolate_lru_page(page, mode)) {
1704                 case 0:
1705                         nr_taken += nr_pages;
1706                         nr_zone_taken[page_zonenum(page)] += nr_pages;
1707                         list_move(&page->lru, dst);
1708                         break;
1709
1710                 case -EBUSY:
1711                         /* else it is being freed elsewhere */
1712                         list_move(&page->lru, src);
1713                         continue;
1714
1715                 default:
1716                         BUG();
1717                 }
1718         }
1719
1720         /*
1721          * Splice any skipped pages to the start of the LRU list. Note that
1722          * this disrupts the LRU order when reclaiming for lower zones but
1723          * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX
1724          * scanning would soon rescan the same pages to skip and put the
1725          * system at risk of premature OOM.
1726          */
1727         if (!list_empty(&pages_skipped)) {
1728                 int zid;
1729
1730                 list_splice(&pages_skipped, src);
1731                 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1732                         if (!nr_skipped[zid])
1733                                 continue;
1734
1735                         __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
1736                         skipped += nr_skipped[zid];
1737                 }
1738         }
1739         *nr_scanned = total_scan;
1740         trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
1741                                     total_scan, skipped, nr_taken, mode, lru);
1742         update_lru_sizes(lruvec, lru, nr_zone_taken);
1743         return nr_taken;
1744 }
1745
1746 /**
1747  * isolate_lru_page - tries to isolate a page from its LRU list
1748  * @page: page to isolate from its LRU list
1749  *
1750  * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1751  * vmstat statistic corresponding to whatever LRU list the page was on.
1752  *
1753  * Returns 0 if the page was removed from an LRU list.
1754  * Returns -EBUSY if the page was not on an LRU list.
1755  *
1756  * The returned page will have PageLRU() cleared.  If it was found on
1757  * the active list, it will have PageActive set.  If it was found on
1758  * the unevictable list, it will have the PageUnevictable bit set. That flag
1759  * may need to be cleared by the caller before letting the page go.
1760  *
1761  * The vmstat statistic corresponding to the list on which the page was
1762  * found will be decremented.
1763  *
1764  * Restrictions:
1765  *
1766  * (1) Must be called with an elevated refcount on the page. This is a
1767  *     fundamentnal difference from isolate_lru_pages (which is called
1768  *     without a stable reference).
1769  * (2) the lru_lock must not be held.
1770  * (3) interrupts must be enabled.
1771  */
1772 int isolate_lru_page(struct page *page)
1773 {
1774         int ret = -EBUSY;
1775
1776         VM_BUG_ON_PAGE(!page_count(page), page);
1777         WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
1778
1779         if (TestClearPageLRU(page)) {
1780                 pg_data_t *pgdat = page_pgdat(page);
1781                 struct lruvec *lruvec;
1782
1783                 get_page(page);
1784                 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1785                 spin_lock_irq(&pgdat->lru_lock);
1786                 del_page_from_lru_list(page, lruvec, page_lru(page));
1787                 spin_unlock_irq(&pgdat->lru_lock);
1788                 ret = 0;
1789         }
1790
1791         return ret;
1792 }
1793
1794 /*
1795  * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
1796  * then get resheduled. When there are massive number of tasks doing page
1797  * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
1798  * the LRU list will go small and be scanned faster than necessary, leading to
1799  * unnecessary swapping, thrashing and OOM.
1800  */
1801 static int too_many_isolated(struct pglist_data *pgdat, int file,
1802                 struct scan_control *sc)
1803 {
1804         unsigned long inactive, isolated;
1805
1806         if (current_is_kswapd())
1807                 return 0;
1808
1809         if (!writeback_throttling_sane(sc))
1810                 return 0;
1811
1812         if (file) {
1813                 inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
1814                 isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
1815         } else {
1816                 inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
1817                 isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
1818         }
1819
1820         /*
1821          * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
1822          * won't get blocked by normal direct-reclaimers, forming a circular
1823          * deadlock.
1824          */
1825         if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
1826                 inactive >>= 3;
1827
1828         return isolated > inactive;
1829 }
1830
1831 /*
1832  * This moves pages from @list to corresponding LRU list.
1833  *
1834  * We move them the other way if the page is referenced by one or more
1835  * processes, from rmap.
1836  *
1837  * If the pages are mostly unmapped, the processing is fast and it is
1838  * appropriate to hold zone_lru_lock across the whole operation.  But if
1839  * the pages are mapped, the processing is slow (page_referenced()) so we
1840  * should drop zone_lru_lock around each page.  It's impossible to balance
1841  * this, so instead we remove the pages from the LRU while processing them.
1842  * It is safe to rely on PG_active against the non-LRU pages in here because
1843  * nobody will play with that bit on a non-LRU page.
1844  *
1845  * The downside is that we have to touch page->_refcount against each page.
1846  * But we had to alter page->flags anyway.
1847  *
1848  * Returns the number of pages moved to the given lruvec.
1849  */
1850
1851 static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
1852                                                      struct list_head *list)
1853 {
1854         struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1855         int nr_pages, nr_moved = 0;
1856         LIST_HEAD(pages_to_free);
1857         struct page *page;
1858         enum lru_list lru;
1859
1860         while (!list_empty(list)) {
1861                 page = lru_to_page(list);
1862                 VM_BUG_ON_PAGE(PageLRU(page), page);
1863                 if (unlikely(!page_evictable(page))) {
1864                         list_del(&page->lru);
1865                         spin_unlock_irq(&pgdat->lru_lock);
1866                         putback_lru_page(page);
1867                         spin_lock_irq(&pgdat->lru_lock);
1868                         continue;
1869                 }
1870                 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1871
1872                 SetPageLRU(page);
1873                 lru = page_lru(page);
1874
1875                 nr_pages = hpage_nr_pages(page);
1876                 update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
1877                 list_move(&page->lru, &lruvec->lists[lru]);
1878
1879                 if (put_page_testzero(page)) {
1880                         __ClearPageLRU(page);
1881                         __ClearPageActive(page);
1882                         del_page_from_lru_list(page, lruvec, lru);
1883
1884                         if (unlikely(PageCompound(page))) {
1885                                 spin_unlock_irq(&pgdat->lru_lock);
1886                                 (*get_compound_page_dtor(page))(page);
1887                                 spin_lock_irq(&pgdat->lru_lock);
1888                         } else
1889                                 list_add(&page->lru, &pages_to_free);
1890                 } else {
1891                         nr_moved += nr_pages;
1892                 }
1893         }
1894
1895         /*
1896          * To save our caller's stack, now use input list for pages to free.
1897          */
1898         list_splice(&pages_to_free, list);
1899
1900         return nr_moved;
1901 }
1902
1903 /*
1904  * If a kernel thread (such as nfsd for loop-back mounts) services
1905  * a backing device by writing to the page cache it sets PF_LESS_THROTTLE.
1906  * In that case we should only throttle if the backing device it is
1907  * writing to is congested.  In other cases it is safe to throttle.
1908  */
1909 static int current_may_throttle(void)
1910 {
1911         return !(current->flags & PF_LESS_THROTTLE) ||
1912                 current->backing_dev_info == NULL ||
1913                 bdi_write_congested(current->backing_dev_info);
1914 }
1915
1916 /*
1917  * shrink_inactive_list() is a helper for shrink_node().  It returns the number
1918  * of reclaimed pages
1919  */
1920 static noinline_for_stack unsigned long
1921 shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1922                      struct scan_control *sc, enum lru_list lru)
1923 {
1924         LIST_HEAD(page_list);
1925         unsigned long nr_scanned;
1926         unsigned long nr_reclaimed = 0;
1927         unsigned long nr_taken;
1928         struct reclaim_stat stat;
1929         int file = is_file_lru(lru);
1930         enum vm_event_item item;
1931         struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1932         struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1933         bool stalled = false;
1934
1935         while (unlikely(too_many_isolated(pgdat, file, sc))) {
1936                 if (stalled)
1937                         return 0;
1938
1939                 /* wait a bit for the reclaimer. */
1940                 msleep(100);
1941                 stalled = true;
1942
1943                 /* We are about to die and free our memory. Return now. */
1944                 if (fatal_signal_pending(current))
1945                         return SWAP_CLUSTER_MAX;
1946         }
1947
1948         lru_add_drain();
1949
1950         spin_lock_irq(&pgdat->lru_lock);
1951
1952         nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
1953                                      &nr_scanned, sc, lru);
1954
1955         __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
1956         reclaim_stat->recent_scanned[file] += nr_taken;
1957
1958         item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
1959         if (!cgroup_reclaim(sc))
1960                 __count_vm_events(item, nr_scanned);
1961         __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
1962         spin_unlock_irq(&pgdat->lru_lock);
1963
1964         if (nr_taken == 0)
1965                 return 0;
1966
1967         nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, &stat, false);
1968
1969         spin_lock_irq(&pgdat->lru_lock);
1970
1971         item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
1972         if (!cgroup_reclaim(sc))
1973                 __count_vm_events(item, nr_reclaimed);
1974         __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
1975         reclaim_stat->recent_rotated[0] += stat.nr_activate[0];
1976         reclaim_stat->recent_rotated[1] += stat.nr_activate[1];
1977
1978         move_pages_to_lru(lruvec, &page_list);
1979
1980         __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
1981
1982         spin_unlock_irq(&pgdat->lru_lock);
1983
1984         mem_cgroup_uncharge_list(&page_list);
1985         free_unref_page_list(&page_list);
1986
1987         /*
1988          * If dirty pages are scanned that are not queued for IO, it
1989          * implies that flushers are not doing their job. This can
1990          * happen when memory pressure pushes dirty pages to the end of
1991          * the LRU before the dirty limits are breached and the dirty
1992          * data has expired. It can also happen when the proportion of
1993          * dirty pages grows not through writes but through memory
1994          * pressure reclaiming all the clean cache. And in some cases,
1995          * the flushers simply cannot keep up with the allocation
1996          * rate. Nudge the flusher threads in case they are asleep.
1997          */
1998         if (stat.nr_unqueued_dirty == nr_taken)
1999                 wakeup_flusher_threads(WB_REASON_VMSCAN);
2000
2001         sc->nr.dirty += stat.nr_dirty;
2002         sc->nr.congested += stat.nr_congested;
2003         sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
2004         sc->nr.writeback += stat.nr_writeback;
2005         sc->nr.immediate += stat.nr_immediate;
2006         sc->nr.taken += nr_taken;
2007         if (file)
2008                 sc->nr.file_taken += nr_taken;
2009
2010         trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
2011                         nr_scanned, nr_reclaimed, &stat, sc->priority, file);
2012         return nr_reclaimed;
2013 }
2014
2015 static void shrink_active_list(unsigned long nr_to_scan,
2016                                struct lruvec *lruvec,
2017                                struct scan_control *sc,
2018                                enum lru_list lru)
2019 {
2020         unsigned long nr_taken;
2021         unsigned long nr_scanned;
2022         unsigned long vm_flags;
2023         LIST_HEAD(l_hold);      /* The pages which were snipped off */
2024         LIST_HEAD(l_active);
2025         LIST_HEAD(l_inactive);
2026         struct page *page;
2027         struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
2028         unsigned nr_deactivate, nr_activate;
2029         unsigned nr_rotated = 0;
2030         int file = is_file_lru(lru);
2031         struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2032
2033         lru_add_drain();
2034
2035         spin_lock_irq(&pgdat->lru_lock);
2036
2037         nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
2038                                      &nr_scanned, sc, lru);
2039
2040         __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
2041         reclaim_stat->recent_scanned[file] += nr_taken;
2042
2043         __count_vm_events(PGREFILL, nr_scanned);
2044         __count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
2045
2046         spin_unlock_irq(&pgdat->lru_lock);
2047
2048         while (!list_empty(&l_hold)) {
2049                 cond_resched();
2050                 page = lru_to_page(&l_hold);
2051                 list_del(&page->lru);
2052
2053                 if (unlikely(!page_evictable(page))) {
2054                         putback_lru_page(page);
2055                         continue;
2056                 }
2057
2058                 if (unlikely(buffer_heads_over_limit)) {
2059                         if (page_has_private(page) && trylock_page(page)) {
2060                                 if (page_has_private(page))
2061                                         try_to_release_page(page, 0);
2062                                 unlock_page(page);
2063                         }
2064                 }
2065
2066                 if (page_referenced(page, 0, sc->target_mem_cgroup,
2067                                     &vm_flags)) {
2068                         nr_rotated += hpage_nr_pages(page);
2069                         /*
2070                          * Identify referenced, file-backed active pages and
2071                          * give them one more trip around the active list. So
2072                          * that executable code get better chances to stay in
2073                          * memory under moderate memory pressure.  Anon pages
2074                          * are not likely to be evicted by use-once streaming
2075                          * IO, plus JVM can create lots of anon VM_EXEC pages,
2076                          * so we ignore them here.
2077                          */
2078                         if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
2079                                 list_add(&page->lru, &l_active);
2080                                 continue;
2081                         }
2082                 }
2083
2084                 ClearPageActive(page);  /* we are de-activating */
2085                 SetPageWorkingset(page);
2086                 list_add(&page->lru, &l_inactive);
2087         }
2088
2089         /*
2090          * Move pages back to the lru list.
2091          */
2092         spin_lock_irq(&pgdat->lru_lock);
2093         /*
2094          * Count referenced pages from currently used mappings as rotated,
2095          * even though only some of them are actually re-activated.  This
2096          * helps balance scan pressure between file and anonymous pages in
2097          * get_scan_count.
2098          */
2099         reclaim_stat->recent_rotated[file] += nr_rotated;
2100
2101         nr_activate = move_pages_to_lru(lruvec, &l_active);
2102         nr_deactivate = move_pages_to_lru(lruvec, &l_inactive);
2103         /* Keep all free pages in l_active list */
2104         list_splice(&l_inactive, &l_active);
2105
2106         __count_vm_events(PGDEACTIVATE, nr_deactivate);
2107         __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
2108
2109         __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
2110         spin_unlock_irq(&pgdat->lru_lock);
2111
2112         mem_cgroup_uncharge_list(&l_active);
2113         free_unref_page_list(&l_active);
2114         trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
2115                         nr_deactivate, nr_rotated, sc->priority, file);
2116 }
2117
2118 unsigned long reclaim_pages(struct list_head *page_list)
2119 {
2120         int nid = -1;
2121         unsigned long nr_reclaimed = 0;
2122         LIST_HEAD(node_page_list);
2123         struct reclaim_stat dummy_stat;
2124         struct page *page;
2125         struct scan_control sc = {
2126                 .gfp_mask = GFP_KERNEL,
2127                 .priority = DEF_PRIORITY,
2128                 .may_writepage = 1,
2129                 .may_unmap = 1,
2130                 .may_swap = 1,
2131         };
2132
2133         while (!list_empty(page_list)) {
2134                 page = lru_to_page(page_list);
2135                 if (nid == -1) {
2136                         nid = page_to_nid(page);
2137                         INIT_LIST_HEAD(&node_page_list);
2138                 }
2139
2140                 if (nid == page_to_nid(page)) {
2141                         ClearPageActive(page);
2142                         list_move(&page->lru, &node_page_list);
2143                         continue;
2144                 }
2145
2146                 nr_reclaimed += shrink_page_list(&node_page_list,
2147                                                 NODE_DATA(nid),
2148                                                 &sc, &dummy_stat, false);
2149                 while (!list_empty(&node_page_list)) {
2150                         page = lru_to_page(&node_page_list);
2151                         list_del(&page->lru);
2152                         putback_lru_page(page);
2153                 }
2154
2155                 nid = -1;
2156         }
2157
2158         if (!list_empty(&node_page_list)) {
2159                 nr_reclaimed += shrink_page_list(&node_page_list,
2160                                                 NODE_DATA(nid),
2161                                                 &sc, &dummy_stat, false);
2162                 while (!list_empty(&node_page_list)) {
2163                         page = lru_to_page(&node_page_list);
2164                         list_del(&page->lru);
2165                         putback_lru_page(page);
2166                 }
2167         }
2168
2169         return nr_reclaimed;
2170 }
2171
2172 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
2173                                  struct lruvec *lruvec, struct scan_control *sc)
2174 {
2175         if (is_active_lru(lru)) {
2176                 if (sc->may_deactivate & (1 << is_file_lru(lru)))
2177                         shrink_active_list(nr_to_scan, lruvec, sc, lru);
2178                 else
2179                         sc->skipped_deactivate = 1;
2180                 return 0;
2181         }
2182
2183         return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
2184 }
2185
2186 /*
2187  * The inactive anon list should be small enough that the VM never has
2188  * to do too much work.
2189  *
2190  * The inactive file list should be small enough to leave most memory
2191  * to the established workingset on the scan-resistant active list,
2192  * but large enough to avoid thrashing the aggregate readahead window.
2193  *
2194  * Both inactive lists should also be large enough that each inactive
2195  * page has a chance to be referenced again before it is reclaimed.
2196  *
2197  * If that fails and refaulting is observed, the inactive list grows.
2198  *
2199  * The inactive_ratio is the target ratio of ACTIVE to INACTIVE pages
2200  * on this LRU, maintained by the pageout code. An inactive_ratio
2201  * of 3 means 3:1 or 25% of the pages are kept on the inactive list.
2202  *
2203  * total     target    max
2204  * memory    ratio     inactive
2205  * -------------------------------------
2206  *   10MB       1         5MB
2207  *  100MB       1        50MB
2208  *    1GB       3       250MB
2209  *   10GB      10       0.9GB
2210  *  100GB      31         3GB
2211  *    1TB     101        10GB
2212  *   10TB     320        32GB
2213  */
2214 static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru)
2215 {
2216         enum lru_list active_lru = inactive_lru + LRU_ACTIVE;
2217         unsigned long inactive, active;
2218         unsigned long inactive_ratio;
2219         unsigned long gb;
2220
2221         inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru);
2222         active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru);
2223
2224         gb = (inactive + active) >> (30 - PAGE_SHIFT);
2225         if (gb)
2226                 inactive_ratio = int_sqrt(10 * gb);
2227         else
2228                 inactive_ratio = 1;
2229
2230         return inactive * inactive_ratio < active;
2231 }
2232
2233 enum scan_balance {
2234         SCAN_EQUAL,
2235         SCAN_FRACT,
2236         SCAN_ANON,
2237         SCAN_FILE,
2238 };
2239
2240 static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc)
2241 {
2242         unsigned long file;
2243         struct lruvec *target_lruvec;
2244
2245         if (lru_gen_enabled())
2246                 return;
2247
2248         target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
2249
2250         /*
2251          * Target desirable inactive:active list ratios for the anon
2252          * and file LRU lists.
2253          */
2254         if (!sc->force_deactivate) {
2255                 unsigned long refaults;
2256
2257                 if (inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
2258                         sc->may_deactivate |= DEACTIVATE_ANON;
2259                 else
2260                         sc->may_deactivate &= ~DEACTIVATE_ANON;
2261
2262                 /*
2263                  * When refaults are being observed, it means a new
2264                  * workingset is being established. Deactivate to get
2265                  * rid of any stale active pages quickly.
2266                  */
2267                 refaults = lruvec_page_state(target_lruvec,
2268                                              WORKINGSET_ACTIVATE);
2269                 if (refaults != target_lruvec->refaults ||
2270                     inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
2271                         sc->may_deactivate |= DEACTIVATE_FILE;
2272                 else
2273                         sc->may_deactivate &= ~DEACTIVATE_FILE;
2274         } else
2275                 sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
2276
2277         /*
2278          * If we have plenty of inactive file pages that aren't
2279          * thrashing, try to reclaim those first before touching
2280          * anonymous pages.
2281          */
2282         file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
2283         if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
2284                 sc->cache_trim_mode = 1;
2285         else
2286                 sc->cache_trim_mode = 0;
2287
2288         /*
2289          * Prevent the reclaimer from falling into the cache trap: as
2290          * cache pages start out inactive, every cache fault will tip
2291          * the scan balance towards the file LRU.  And as the file LRU
2292          * shrinks, so does the window for rotation from references.
2293          * This means we have a runaway feedback loop where a tiny
2294          * thrashing file LRU becomes infinitely more attractive than
2295          * anon pages.  Try to detect this based on file LRU size.
2296          */
2297         if (!cgroup_reclaim(sc)) {
2298                 unsigned long total_high_wmark = 0;
2299                 unsigned long free, anon;
2300                 int z;
2301
2302                 free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
2303                 file = node_page_state(pgdat, NR_ACTIVE_FILE) +
2304                            node_page_state(pgdat, NR_INACTIVE_FILE);
2305
2306                 for (z = 0; z < MAX_NR_ZONES; z++) {
2307                         struct zone *zone = &pgdat->node_zones[z];
2308
2309                         if (!managed_zone(zone))
2310                                 continue;
2311
2312                         total_high_wmark += high_wmark_pages(zone);
2313                 }
2314
2315                 /*
2316                  * Consider anon: if that's low too, this isn't a
2317                  * runaway file reclaim problem, but rather just
2318                  * extreme pressure. Reclaim as per usual then.
2319                  */
2320                 anon = node_page_state(pgdat, NR_INACTIVE_ANON);
2321
2322                 sc->file_is_tiny =
2323                         file + free <= total_high_wmark &&
2324                         !(sc->may_deactivate & DEACTIVATE_ANON) &&
2325                         anon >> sc->priority;
2326         }
2327 }
2328
2329 /*
2330  * Determine how aggressively the anon and file LRU lists should be
2331  * scanned.  The relative value of each set of LRU lists is determined
2332  * by looking at the fraction of the pages scanned we did rotate back
2333  * onto the active list instead of evict.
2334  *
2335  * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
2336  * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
2337  */
2338 static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
2339                            unsigned long *nr)
2340 {
2341         struct mem_cgroup *memcg = lruvec_memcg(lruvec);
2342         int swappiness = mem_cgroup_swappiness(memcg);
2343         struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
2344         u64 fraction[ANON_AND_FILE];
2345         u64 denominator = 0;    /* gcc */
2346         struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2347         unsigned long anon_prio, file_prio;
2348         enum scan_balance scan_balance;
2349         unsigned long anon, file;
2350         unsigned long ap, fp;
2351         enum lru_list lru;
2352
2353         /* If we have no swap space, do not bother scanning anon pages. */
2354         if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
2355                 scan_balance = SCAN_FILE;
2356                 goto out;
2357         }
2358
2359         /*
2360          * Global reclaim will swap to prevent OOM even with no
2361          * swappiness, but memcg users want to use this knob to
2362          * disable swapping for individual groups completely when
2363          * using the memory controller's swap limit feature would be
2364          * too expensive.
2365          */
2366         if (cgroup_reclaim(sc) && !swappiness) {
2367                 scan_balance = SCAN_FILE;
2368                 goto out;
2369         }
2370
2371         /*
2372          * Do not apply any pressure balancing cleverness when the
2373          * system is close to OOM, scan both anon and file equally
2374          * (unless the swappiness setting disagrees with swapping).
2375          */
2376         if (!sc->priority && swappiness) {
2377                 scan_balance = SCAN_EQUAL;
2378                 goto out;
2379         }
2380
2381         /*
2382          * If the system is almost out of file pages, force-scan anon.
2383          */
2384         if (sc->file_is_tiny) {
2385                 scan_balance = SCAN_ANON;
2386                 goto out;
2387         }
2388
2389         /*
2390          * If there is enough inactive page cache, we do not reclaim
2391          * anything from the anonymous working right now.
2392          */
2393         if (sc->cache_trim_mode) {
2394                 scan_balance = SCAN_FILE;
2395                 goto out;
2396         }
2397
2398         scan_balance = SCAN_FRACT;
2399
2400         /*
2401          * With swappiness at 100, anonymous and file have the same priority.
2402          * This scanning priority is essentially the inverse of IO cost.
2403          */
2404         anon_prio = swappiness;
2405         file_prio = 200 - anon_prio;
2406
2407         /*
2408          * OK, so we have swap space and a fair amount of page cache
2409          * pages.  We use the recently rotated / recently scanned
2410          * ratios to determine how valuable each cache is.
2411          *
2412          * Because workloads change over time (and to avoid overflow)
2413          * we keep these statistics as a floating average, which ends
2414          * up weighing recent references more than old ones.
2415          *
2416          * anon in [0], file in [1]
2417          */
2418
2419         anon  = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) +
2420                 lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES);
2421         file  = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES) +
2422                 lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, MAX_NR_ZONES);
2423
2424         spin_lock_irq(&pgdat->lru_lock);
2425         if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
2426                 reclaim_stat->recent_scanned[0] /= 2;
2427                 reclaim_stat->recent_rotated[0] /= 2;
2428         }
2429
2430         if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
2431                 reclaim_stat->recent_scanned[1] /= 2;
2432                 reclaim_stat->recent_rotated[1] /= 2;
2433         }
2434
2435         /*
2436          * The amount of pressure on anon vs file pages is inversely
2437          * proportional to the fraction of recently scanned pages on
2438          * each list that were recently referenced and in active use.
2439          */
2440         ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
2441         ap /= reclaim_stat->recent_rotated[0] + 1;
2442
2443         fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
2444         fp /= reclaim_stat->recent_rotated[1] + 1;
2445         spin_unlock_irq(&pgdat->lru_lock);
2446
2447         fraction[0] = ap;
2448         fraction[1] = fp;
2449         denominator = ap + fp + 1;
2450 out:
2451         for_each_evictable_lru(lru) {
2452                 int file = is_file_lru(lru);
2453                 unsigned long lruvec_size;
2454                 unsigned long scan;
2455                 unsigned long protection;
2456
2457                 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
2458                 protection = mem_cgroup_protection(memcg,
2459                                                    sc->memcg_low_reclaim);
2460
2461                 if (protection) {
2462                         /*
2463                          * Scale a cgroup's reclaim pressure by proportioning
2464                          * its current usage to its memory.low or memory.min
2465                          * setting.
2466                          *
2467                          * This is important, as otherwise scanning aggression
2468                          * becomes extremely binary -- from nothing as we
2469                          * approach the memory protection threshold, to totally
2470                          * nominal as we exceed it.  This results in requiring
2471                          * setting extremely liberal protection thresholds. It
2472                          * also means we simply get no protection at all if we
2473                          * set it too low, which is not ideal.
2474                          *
2475                          * If there is any protection in place, we reduce scan
2476                          * pressure by how much of the total memory used is
2477                          * within protection thresholds.
2478                          *
2479                          * There is one special case: in the first reclaim pass,
2480                          * we skip over all groups that are within their low
2481                          * protection. If that fails to reclaim enough pages to
2482                          * satisfy the reclaim goal, we come back and override
2483                          * the best-effort low protection. However, we still
2484                          * ideally want to honor how well-behaved groups are in
2485                          * that case instead of simply punishing them all
2486                          * equally. As such, we reclaim them based on how much
2487                          * memory they are using, reducing the scan pressure
2488                          * again by how much of the total memory used is under
2489                          * hard protection.
2490                          */
2491                         unsigned long cgroup_size = mem_cgroup_size(memcg);
2492
2493                         /* Avoid TOCTOU with earlier protection check */
2494                         cgroup_size = max(cgroup_size, protection);
2495
2496                         scan = lruvec_size - lruvec_size * protection /
2497                                 cgroup_size;
2498
2499                         /*
2500                          * Minimally target SWAP_CLUSTER_MAX pages to keep
2501                          * reclaim moving forwards, avoiding decremeting
2502                          * sc->priority further than desirable.
2503                          */
2504                         scan = max(scan, SWAP_CLUSTER_MAX);
2505                 } else {
2506                         scan = lruvec_size;
2507                 }
2508
2509                 scan >>= sc->priority;
2510
2511                 /*
2512                  * If the cgroup's already been deleted, make sure to
2513                  * scrape out the remaining cache.
2514                  */
2515                 if (!scan && !mem_cgroup_online(memcg))
2516                         scan = min(lruvec_size, SWAP_CLUSTER_MAX);
2517
2518                 switch (scan_balance) {
2519                 case SCAN_EQUAL:
2520                         /* Scan lists relative to size */
2521                         break;
2522                 case SCAN_FRACT:
2523                         /*
2524                          * Scan types proportional to swappiness and
2525                          * their relative recent reclaim efficiency.
2526                          * Make sure we don't miss the last page on
2527                          * the offlined memory cgroups because of a
2528                          * round-off error.
2529                          */
2530                         scan = mem_cgroup_online(memcg) ?
2531                                div64_u64(scan * fraction[file], denominator) :
2532                                DIV64_U64_ROUND_UP(scan * fraction[file],
2533                                                   denominator);
2534                         break;
2535                 case SCAN_FILE:
2536                 case SCAN_ANON:
2537                         /* Scan one type exclusively */
2538                         if ((scan_balance == SCAN_FILE) != file)
2539                                 scan = 0;
2540                         break;
2541                 default:
2542                         /* Look ma, no brain */
2543                         BUG();
2544                 }
2545
2546                 nr[lru] = scan;
2547         }
2548 }
2549
2550 #ifdef CONFIG_LRU_GEN
2551
2552 /******************************************************************************
2553  *                          shorthand helpers
2554  ******************************************************************************/
2555
2556 #define LRU_REFS_FLAGS  (BIT(PG_referenced) | BIT(PG_workingset))
2557
2558 #define DEFINE_MAX_SEQ(lruvec)                                          \
2559         unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq)
2560
2561 #define DEFINE_MIN_SEQ(lruvec)                                          \
2562         unsigned long min_seq[ANON_AND_FILE] = {                        \
2563                 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]),      \
2564                 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]),      \
2565         }
2566
2567 #define for_each_gen_type_zone(gen, type, zone)                         \
2568         for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++)                   \
2569                 for ((type) = 0; (type) < ANON_AND_FILE; (type)++)      \
2570                         for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
2571
2572 static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid)
2573 {
2574         struct pglist_data *pgdat = NODE_DATA(nid);
2575
2576 #ifdef CONFIG_MEMCG
2577         if (memcg) {
2578                 struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec;
2579
2580                 /* for hotadd_new_pgdat() */
2581                 if (!lruvec->pgdat)
2582                         lruvec->pgdat = pgdat;
2583
2584                 return lruvec;
2585         }
2586 #endif
2587         VM_WARN_ON_ONCE(!mem_cgroup_disabled());
2588
2589         return pgdat ? &pgdat->__lruvec : NULL;
2590 }
2591
2592 static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc)
2593 {
2594         struct mem_cgroup *memcg = lruvec_memcg(lruvec);
2595         /* struct pglist_data *pgdat = lruvec_pgdat(lruvec); */
2596
2597         /* FIXME: see a2a36488a61c + 26aa2d199d6f */
2598         if (/* !can_demote(pgdat->node_id, sc) && */
2599             mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH)
2600                 return 0;
2601
2602         return mem_cgroup_swappiness(memcg);
2603 }
2604
2605 static int get_nr_gens(struct lruvec *lruvec, int type)
2606 {
2607         return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1;
2608 }
2609
2610 static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
2611 {
2612         /* see the comment on lru_gen_struct */
2613         return get_nr_gens(lruvec, LRU_GEN_FILE) >= MIN_NR_GENS &&
2614                get_nr_gens(lruvec, LRU_GEN_FILE) <= get_nr_gens(lruvec, LRU_GEN_ANON) &&
2615                get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS;
2616 }
2617
2618 /******************************************************************************
2619  *                          mm_struct list
2620  ******************************************************************************/
2621
2622 static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
2623 {
2624         static struct lru_gen_mm_list mm_list = {
2625                 .fifo = LIST_HEAD_INIT(mm_list.fifo),
2626                 .lock = __SPIN_LOCK_UNLOCKED(mm_list.lock),
2627         };
2628
2629 #ifdef CONFIG_MEMCG
2630         if (memcg)
2631                 return &memcg->mm_list;
2632 #endif
2633         VM_WARN_ON_ONCE(!mem_cgroup_disabled());
2634
2635         return &mm_list;
2636 }
2637
2638 void lru_gen_add_mm(struct mm_struct *mm)
2639 {
2640         int nid;
2641         struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm);
2642         struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
2643
2644         VM_WARN_ON_ONCE(!list_empty(&mm->lru_gen.list));
2645 #ifdef CONFIG_MEMCG
2646         VM_WARN_ON_ONCE(mm->lru_gen.memcg);
2647         mm->lru_gen.memcg = memcg;
2648 #endif
2649         spin_lock(&mm_list->lock);
2650
2651         for_each_node_state(nid, N_MEMORY) {
2652                 struct lruvec *lruvec = get_lruvec(memcg, nid);
2653
2654                 if (!lruvec)
2655                         continue;
2656
2657                 /* the first addition since the last iteration */
2658                 if (lruvec->mm_state.tail == &mm_list->fifo)
2659                         lruvec->mm_state.tail = &mm->lru_gen.list;
2660         }
2661
2662         list_add_tail(&mm->lru_gen.list, &mm_list->fifo);
2663
2664         spin_unlock(&mm_list->lock);
2665 }
2666
2667 void lru_gen_del_mm(struct mm_struct *mm)
2668 {
2669         int nid;
2670         struct lru_gen_mm_list *mm_list;
2671         struct mem_cgroup *memcg = NULL;
2672
2673         if (list_empty(&mm->lru_gen.list))
2674                 return;
2675
2676 #ifdef CONFIG_MEMCG
2677         memcg = mm->lru_gen.memcg;
2678 #endif
2679         mm_list = get_mm_list(memcg);
2680
2681         spin_lock(&mm_list->lock);
2682
2683         for_each_node(nid) {
2684                 struct lruvec *lruvec = get_lruvec(memcg, nid);
2685
2686                 if (!lruvec)
2687                         continue;
2688
2689                 /* where the last iteration ended (exclusive) */
2690                 if (lruvec->mm_state.tail == &mm->lru_gen.list)
2691                         lruvec->mm_state.tail = lruvec->mm_state.tail->next;
2692
2693                 /* where the current iteration continues (inclusive) */
2694                 if (lruvec->mm_state.head != &mm->lru_gen.list)
2695                         continue;
2696
2697                 lruvec->mm_state.head = lruvec->mm_state.head->next;
2698                 /* the deletion ends the current iteration */
2699                 if (lruvec->mm_state.head == &mm_list->fifo)
2700                         WRITE_ONCE(lruvec->mm_state.seq, lruvec->mm_state.seq + 1);
2701         }
2702
2703         list_del_init(&mm->lru_gen.list);
2704
2705         spin_unlock(&mm_list->lock);
2706
2707 #ifdef CONFIG_MEMCG
2708         mem_cgroup_put(mm->lru_gen.memcg);
2709         mm->lru_gen.memcg = NULL;
2710 #endif
2711 }
2712
2713 #ifdef CONFIG_MEMCG
2714 void lru_gen_migrate_mm(struct mm_struct *mm)
2715 {
2716         struct mem_cgroup *memcg;
2717         struct task_struct *task = rcu_dereference_protected(mm->owner, true);
2718
2719         VM_WARN_ON_ONCE(task->mm != mm);
2720         lockdep_assert_held(&task->alloc_lock);
2721
2722         /* for mm_update_next_owner() */
2723         if (mem_cgroup_disabled())
2724                 return;
2725
2726         rcu_read_lock();
2727         memcg = mem_cgroup_from_task(task);
2728         rcu_read_unlock();
2729         if (memcg == mm->lru_gen.memcg)
2730                 return;
2731
2732         VM_WARN_ON_ONCE(!mm->lru_gen.memcg);
2733         VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list));
2734
2735         lru_gen_del_mm(mm);
2736         lru_gen_add_mm(mm);
2737 }
2738 #endif
2739
2740 /*
2741  * Bloom filters with m=1<<15, k=2 and the false positive rates of ~1/5 when
2742  * n=10,000 and ~1/2 when n=20,000, where, conventionally, m is the number of
2743  * bits in a bitmap, k is the number of hash functions and n is the number of
2744  * inserted items.
2745  *
2746  * Page table walkers use one of the two filters to reduce their search space.
2747  * To get rid of non-leaf entries that no longer have enough leaf entries, the
2748  * aging uses the double-buffering technique to flip to the other filter each
2749  * time it produces a new generation. For non-leaf entries that have enough
2750  * leaf entries, the aging carries them over to the next generation in
2751  * walk_pmd_range(); the eviction also report them when walking the rmap
2752  * in lru_gen_look_around().
2753  *
2754  * For future optimizations:
2755  * 1. It's not necessary to keep both filters all the time. The spare one can be
2756  *    freed after the RCU grace period and reallocated if needed again.
2757  * 2. And when reallocating, it's worth scaling its size according to the number
2758  *    of inserted entries in the other filter, to reduce the memory overhead on
2759  *    small systems and false positives on large systems.
2760  * 3. Jenkins' hash function is an alternative to Knuth's.
2761  */
2762 #define BLOOM_FILTER_SHIFT      15
2763
2764 static inline int filter_gen_from_seq(unsigned long seq)
2765 {
2766         return seq % NR_BLOOM_FILTERS;
2767 }
2768
2769 static void get_item_key(void *item, int *key)
2770 {
2771         u32 hash = hash_ptr(item, BLOOM_FILTER_SHIFT * 2);
2772
2773         BUILD_BUG_ON(BLOOM_FILTER_SHIFT * 2 > BITS_PER_TYPE(u32));
2774
2775         key[0] = hash & (BIT(BLOOM_FILTER_SHIFT) - 1);
2776         key[1] = hash >> BLOOM_FILTER_SHIFT;
2777 }
2778
2779 static void reset_bloom_filter(struct lruvec *lruvec, unsigned long seq)
2780 {
2781         unsigned long *filter;
2782         int gen = filter_gen_from_seq(seq);
2783
2784         filter = lruvec->mm_state.filters[gen];
2785         if (filter) {
2786                 bitmap_clear(filter, 0, BIT(BLOOM_FILTER_SHIFT));
2787                 return;
2788         }
2789
2790         filter = bitmap_zalloc(BIT(BLOOM_FILTER_SHIFT),
2791                                __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
2792         WRITE_ONCE(lruvec->mm_state.filters[gen], filter);
2793 }
2794
2795 static void update_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item)
2796 {
2797         int key[2];
2798         unsigned long *filter;
2799         int gen = filter_gen_from_seq(seq);
2800
2801         filter = READ_ONCE(lruvec->mm_state.filters[gen]);
2802         if (!filter)
2803                 return;
2804
2805         get_item_key(item, key);
2806
2807         if (!test_bit(key[0], filter))
2808                 set_bit(key[0], filter);
2809         if (!test_bit(key[1], filter))
2810                 set_bit(key[1], filter);
2811 }
2812
2813 static bool test_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item)
2814 {
2815         int key[2];
2816         unsigned long *filter;
2817         int gen = filter_gen_from_seq(seq);
2818
2819         filter = READ_ONCE(lruvec->mm_state.filters[gen]);
2820         if (!filter)
2821                 return true;
2822
2823         get_item_key(item, key);
2824
2825         return test_bit(key[0], filter) && test_bit(key[1], filter);
2826 }
2827
2828 static void reset_mm_stats(struct lruvec *lruvec, struct lru_gen_mm_walk *walk, bool last)
2829 {
2830         int i;
2831         int hist;
2832
2833         lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock);
2834
2835         if (walk) {
2836                 hist = lru_hist_from_seq(walk->max_seq);
2837
2838                 for (i = 0; i < NR_MM_STATS; i++) {
2839                         WRITE_ONCE(lruvec->mm_state.stats[hist][i],
2840                                    lruvec->mm_state.stats[hist][i] + walk->mm_stats[i]);
2841                         walk->mm_stats[i] = 0;
2842                 }
2843         }
2844
2845         if (NR_HIST_GENS > 1 && last) {
2846                 hist = lru_hist_from_seq(lruvec->mm_state.seq + 1);
2847
2848                 for (i = 0; i < NR_MM_STATS; i++)
2849                         WRITE_ONCE(lruvec->mm_state.stats[hist][i], 0);
2850         }
2851 }
2852
2853 static bool should_skip_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk)
2854 {
2855         int type;
2856         unsigned long size = 0;
2857         struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
2858         int key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap);
2859
2860         if (!walk->force_scan && !test_bit(key, &mm->lru_gen.bitmap))
2861                 return true;
2862
2863         clear_bit(key, &mm->lru_gen.bitmap);
2864
2865         for (type = !walk->can_swap; type < ANON_AND_FILE; type++) {
2866                 size += type ? get_mm_counter(mm, MM_FILEPAGES) :
2867                                get_mm_counter(mm, MM_ANONPAGES) +
2868                                get_mm_counter(mm, MM_SHMEMPAGES);
2869         }
2870
2871         if (size < MIN_LRU_BATCH)
2872                 return true;
2873
2874         return !mmget_not_zero(mm);
2875 }
2876
2877 static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk,
2878                             struct mm_struct **iter)
2879 {
2880         bool first = false;
2881         bool last = true;
2882         struct mm_struct *mm = NULL;
2883         struct mem_cgroup *memcg = lruvec_memcg(lruvec);
2884         struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
2885         struct lru_gen_mm_state *mm_state = &lruvec->mm_state;
2886
2887         /*
2888          * There are four interesting cases for this page table walker:
2889          * 1. It tries to start a new iteration of mm_list with a stale max_seq;
2890          *    there is nothing left to do.
2891          * 2. It's the first of the current generation, and it needs to reset
2892          *    the Bloom filter for the next generation.
2893          * 3. It reaches the end of mm_list, and it needs to increment
2894          *    mm_state->seq; the iteration is done.
2895          * 4. It's the last of the current generation, and it needs to reset the
2896          *    mm stats counters for the next generation.
2897          */
2898         spin_lock(&mm_list->lock);
2899
2900         VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->max_seq);
2901         VM_WARN_ON_ONCE(*iter && mm_state->seq > walk->max_seq);
2902         VM_WARN_ON_ONCE(*iter && !mm_state->nr_walkers);
2903
2904         if (walk->max_seq <= mm_state->seq) {
2905                 if (!*iter)
2906                         last = false;
2907                 goto done;
2908         }
2909
2910         if (!mm_state->nr_walkers) {
2911                 VM_WARN_ON_ONCE(mm_state->head && mm_state->head != &mm_list->fifo);
2912
2913                 mm_state->head = mm_list->fifo.next;
2914                 first = true;
2915         }
2916
2917         while (!mm && mm_state->head != &mm_list->fifo) {
2918                 mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list);
2919
2920                 mm_state->head = mm_state->head->next;
2921
2922                 /* force scan for those added after the last iteration */
2923                 if (!mm_state->tail || mm_state->tail == &mm->lru_gen.list) {
2924                         mm_state->tail = mm_state->head;
2925                         walk->force_scan = true;
2926                 }
2927
2928                 if (should_skip_mm(mm, walk))
2929                         mm = NULL;
2930         }
2931
2932         if (mm_state->head == &mm_list->fifo)
2933                 WRITE_ONCE(mm_state->seq, mm_state->seq + 1);
2934 done:
2935         if (*iter && !mm)
2936                 mm_state->nr_walkers--;
2937         if (!*iter && mm)
2938                 mm_state->nr_walkers++;
2939
2940         if (mm_state->nr_walkers)
2941                 last = false;
2942
2943         if (*iter || last)
2944                 reset_mm_stats(lruvec, walk, last);
2945
2946         spin_unlock(&mm_list->lock);
2947
2948         if (mm && first)
2949                 reset_bloom_filter(lruvec, walk->max_seq + 1);
2950
2951         if (*iter)
2952                 mmput_async(*iter);
2953
2954         *iter = mm;
2955
2956         return last;
2957 }
2958
2959 static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long max_seq)
2960 {
2961         bool success = false;
2962         struct mem_cgroup *memcg = lruvec_memcg(lruvec);
2963         struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
2964         struct lru_gen_mm_state *mm_state = &lruvec->mm_state;
2965
2966         spin_lock(&mm_list->lock);
2967
2968         VM_WARN_ON_ONCE(mm_state->seq + 1 < max_seq);
2969
2970         if (max_seq > mm_state->seq && !mm_state->nr_walkers) {
2971                 VM_WARN_ON_ONCE(mm_state->head && mm_state->head != &mm_list->fifo);
2972
2973                 WRITE_ONCE(mm_state->seq, mm_state->seq + 1);
2974                 reset_mm_stats(lruvec, NULL, true);
2975                 success = true;
2976         }
2977
2978         spin_unlock(&mm_list->lock);
2979
2980         return success;
2981 }
2982
2983 /******************************************************************************
2984  *                          refault feedback loop
2985  ******************************************************************************/
2986
2987 /*
2988  * A feedback loop based on Proportional-Integral-Derivative (PID) controller.
2989  *
2990  * The P term is refaulted/(evicted+protected) from a tier in the generation
2991  * currently being evicted; the I term is the exponential moving average of the
2992  * P term over the generations previously evicted, using the smoothing factor
2993  * 1/2; the D term isn't supported.
2994  *
2995  * The setpoint (SP) is always the first tier of one type; the process variable
2996  * (PV) is either any tier of the other type or any other tier of the same
2997  * type.
2998  *
2999  * The error is the difference between the SP and the PV; the correction is to
3000  * turn off protection when SP>PV or turn on protection when SP<PV.
3001  *
3002  * For future optimizations:
3003  * 1. The D term may discount the other two terms over time so that long-lived
3004  *    generations can resist stale information.
3005  */
3006 struct ctrl_pos {
3007         unsigned long refaulted;
3008         unsigned long total;
3009         int gain;
3010 };
3011
3012 static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain,
3013                           struct ctrl_pos *pos)
3014 {
3015         struct lru_gen_struct *lrugen = &lruvec->lrugen;
3016         int hist = lru_hist_from_seq(lrugen->min_seq[type]);
3017
3018         pos->refaulted = lrugen->avg_refaulted[type][tier] +
3019                          atomic_long_read(&lrugen->refaulted[hist][type][tier]);
3020         pos->total = lrugen->avg_total[type][tier] +
3021                      atomic_long_read(&lrugen->evicted[hist][type][tier]);
3022         if (tier)
3023                 pos->total += lrugen->protected[hist][type][tier - 1];
3024         pos->gain = gain;
3025 }
3026
3027 static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover)
3028 {
3029         int hist, tier;
3030         struct lru_gen_struct *lrugen = &lruvec->lrugen;
3031         bool clear = carryover ? NR_HIST_GENS == 1 : NR_HIST_GENS > 1;
3032         unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1;
3033
3034         lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock);
3035
3036         if (!carryover && !clear)
3037                 return;
3038
3039         hist = lru_hist_from_seq(seq);
3040
3041         for (tier = 0; tier < MAX_NR_TIERS; tier++) {
3042                 if (carryover) {
3043                         unsigned long sum;
3044
3045                         sum = lrugen->avg_refaulted[type][tier] +
3046                               atomic_long_read(&lrugen->refaulted[hist][type][tier]);
3047                         WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2);
3048
3049                         sum = lrugen->avg_total[type][tier] +
3050                               atomic_long_read(&lrugen->evicted[hist][type][tier]);
3051                         if (tier)
3052                                 sum += lrugen->protected[hist][type][tier - 1];
3053                         WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2);
3054                 }
3055
3056                 if (clear) {
3057                         atomic_long_set(&lrugen->refaulted[hist][type][tier], 0);
3058                         atomic_long_set(&lrugen->evicted[hist][type][tier], 0);
3059                         if (tier)
3060                                 WRITE_ONCE(lrugen->protected[hist][type][tier - 1], 0);
3061                 }
3062         }
3063 }
3064
3065 static bool positive_ctrl_err(struct ctrl_pos *sp, struct ctrl_pos *pv)
3066 {
3067         /*
3068          * Return true if the PV has a limited number of refaults or a lower
3069          * refaulted/total than the SP.
3070          */
3071         return pv->refaulted < MIN_LRU_BATCH ||
3072                pv->refaulted * (sp->total + MIN_LRU_BATCH) * sp->gain <=
3073                (sp->refaulted + 1) * pv->total * pv->gain;
3074 }
3075
3076 /******************************************************************************
3077  *                          the aging
3078  ******************************************************************************/
3079
3080 /* promote pages accessed through page tables */
3081 static int page_update_gen(struct page *page, int gen)
3082 {
3083         unsigned long new_flags, old_flags = READ_ONCE(page->flags);
3084
3085         VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
3086         VM_WARN_ON_ONCE(!rcu_read_lock_held());
3087
3088         do {
3089                 /* lru_gen_del_page() has isolated this page? */
3090                 if (!(old_flags & LRU_GEN_MASK)) {
3091                         /* for shrink_page_list() */
3092                         new_flags = old_flags | BIT(PG_referenced);
3093                         continue;
3094                 }
3095
3096                 new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS);
3097                 new_flags |= (gen + 1UL) << LRU_GEN_PGOFF;
3098         } while (!try_cmpxchg(&page->flags, &old_flags, new_flags));
3099
3100         return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
3101 }
3102
3103 /* protect pages accessed multiple times through file descriptors */
3104 static int page_inc_gen(struct lruvec *lruvec, struct page *page, bool reclaiming)
3105 {
3106         int type = page_is_file_cache(page);
3107         struct lru_gen_struct *lrugen = &lruvec->lrugen;
3108         int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
3109         unsigned long new_flags, old_flags = READ_ONCE(page->flags);
3110
3111         VM_WARN_ON_ONCE_PAGE(!(old_flags & LRU_GEN_MASK), page);
3112
3113         do {
3114                 new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
3115                 /* page_update_gen() has promoted this page? */
3116                 if (new_gen >= 0 && new_gen != old_gen)
3117                         return new_gen;
3118
3119                 new_gen = (old_gen + 1) % MAX_NR_GENS;
3120
3121                 new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS);
3122                 new_flags |= (new_gen + 1UL) << LRU_GEN_PGOFF;
3123                 /* for end_page_writeback() */
3124                 if (reclaiming)
3125                         new_flags |= BIT(PG_reclaim);
3126         } while (!try_cmpxchg(&page->flags, &old_flags, new_flags));
3127
3128         lru_gen_update_size(lruvec, page, old_gen, new_gen);
3129
3130         return new_gen;
3131 }
3132
3133 static void update_batch_size(struct lru_gen_mm_walk *walk, struct page *page,
3134                               int old_gen, int new_gen)
3135 {
3136         int type = page_is_file_cache(page);
3137         int zone = page_zonenum(page);
3138         int delta = hpage_nr_pages(page);
3139
3140         VM_WARN_ON_ONCE(old_gen >= MAX_NR_GENS);
3141         VM_WARN_ON_ONCE(new_gen >= MAX_NR_GENS);
3142
3143         walk->batched++;
3144
3145         walk->nr_pages[old_gen][type][zone] -= delta;
3146         walk->nr_pages[new_gen][type][zone] += delta;
3147 }
3148
3149 static void reset_batch_size(struct lruvec *lruvec, struct lru_gen_mm_walk *walk)
3150 {
3151         int gen, type, zone;
3152         struct lru_gen_struct *lrugen = &lruvec->lrugen;
3153
3154         walk->batched = 0;
3155
3156         for_each_gen_type_zone(gen, type, zone) {
3157                 enum lru_list lru = type * LRU_INACTIVE_FILE;
3158                 int delta = walk->nr_pages[gen][type][zone];
3159
3160                 if (!delta)
3161                         continue;
3162
3163                 walk->nr_pages[gen][type][zone] = 0;
3164                 WRITE_ONCE(lrugen->nr_pages[gen][type][zone],
3165                            lrugen->nr_pages[gen][type][zone] + delta);
3166
3167                 if (lru_gen_is_active(lruvec, gen))
3168                         lru += LRU_ACTIVE;
3169                 __update_lru_size(lruvec, lru, zone, delta);
3170         }
3171 }
3172
3173 static int should_skip_vma(unsigned long start, unsigned long end, struct mm_walk *args)
3174 {
3175         struct address_space *mapping;
3176         struct vm_area_struct *vma = args->vma;
3177         struct lru_gen_mm_walk *walk = args->private;
3178
3179         if (!vma_is_accessible(vma))
3180                 return true;
3181
3182         if (is_vm_hugetlb_page(vma))
3183                 return true;
3184
3185         if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL | VM_SEQ_READ | VM_RAND_READ))
3186                 return true;
3187
3188         if (vma == get_gate_vma(vma->vm_mm))
3189                 return true;
3190
3191         if (vma_is_anonymous(vma))
3192                 return !walk->can_swap;
3193
3194         if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping))
3195                 return true;
3196
3197         mapping = vma->vm_file->f_mapping;
3198         if (mapping_unevictable(mapping))
3199                 return true;
3200
3201         if (shmem_mapping(mapping))
3202                 return !walk->can_swap;
3203
3204         /* to exclude special mappings like dax, etc. */
3205         return !mapping->a_ops->readpage;
3206 }
3207
3208 /*
3209  * Some userspace memory allocators map many single-page VMAs. Instead of
3210  * returning back to the PGD table for each of such VMAs, finish an entire PMD
3211  * table to reduce zigzags and improve cache performance.
3212  */
3213 static bool get_next_vma(unsigned long mask, unsigned long size, struct mm_walk *args,
3214                          unsigned long *vm_start, unsigned long *vm_end)
3215 {
3216         unsigned long start = round_up(*vm_end, size);
3217         unsigned long end = (start | ~mask) + 1;
3218
3219         VM_WARN_ON_ONCE(mask & size);
3220         VM_WARN_ON_ONCE((start & mask) != (*vm_start & mask));
3221
3222         while (args->vma) {
3223                 if (start >= args->vma->vm_end) {
3224                         args->vma = args->vma->vm_next;
3225                         continue;
3226                 }
3227
3228                 if (end && end <= args->vma->vm_start)
3229                         return false;
3230
3231                 if (should_skip_vma(args->vma->vm_start, args->vma->vm_end, args)) {
3232                         args->vma = args->vma->vm_next;
3233                         continue;
3234                 }
3235
3236                 *vm_start = max(start, args->vma->vm_start);
3237                 *vm_end = min(end - 1, args->vma->vm_end - 1) + 1;
3238
3239                 return true;
3240         }
3241
3242         return false;
3243 }
3244
3245 static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned long addr)
3246 {
3247         unsigned long pfn = pte_pfn(pte);
3248
3249         VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end);
3250
3251         if (!pte_present(pte) || is_zero_pfn(pfn))
3252                 return -1;
3253
3254         if (WARN_ON_ONCE(pte_devmap(pte) || pte_special(pte)))
3255                 return -1;
3256
3257         if (WARN_ON_ONCE(!pfn_valid(pfn)))
3258                 return -1;
3259
3260         return pfn;
3261 }
3262
3263 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
3264 static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr)
3265 {
3266         unsigned long pfn = pmd_pfn(pmd);
3267
3268         VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end);
3269
3270         if (!pmd_present(pmd) || is_huge_zero_pmd(pmd))
3271                 return -1;
3272
3273         if (WARN_ON_ONCE(pmd_devmap(pmd)))
3274                 return -1;
3275
3276         if (WARN_ON_ONCE(!pfn_valid(pfn)))
3277                 return -1;
3278
3279         return pfn;
3280 }
3281 #endif
3282
3283 static struct page *get_pfn_page(unsigned long pfn, struct mem_cgroup *memcg,
3284                                  struct pglist_data *pgdat, bool can_swap)
3285 {
3286         struct page *page;
3287
3288         /* try to avoid unnecessary memory loads */
3289         if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
3290                 return NULL;
3291
3292         page = compound_head(pfn_to_page(pfn));
3293         if (page_to_nid(page) != pgdat->node_id)
3294                 return NULL;
3295
3296         if (page_memcg_rcu(page) != memcg)
3297                 return NULL;
3298
3299         /* file VMAs can contain anon pages from COW */
3300         if (!page_is_file_cache(page) && !can_swap)
3301                 return NULL;
3302
3303         return page;
3304 }
3305
3306 static bool suitable_to_scan(int total, int young)
3307 {
3308         int n = clamp_t(int, cache_line_size() / sizeof(pte_t), 2, 8);
3309
3310         /* suitable if the average number of young PTEs per cacheline is >=1 */
3311         return young * n >= total;
3312 }
3313
3314 static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
3315                            struct mm_walk *args)
3316 {
3317         int i;
3318         pte_t *pte;
3319         spinlock_t *ptl;
3320         unsigned long addr;
3321         int total = 0;
3322         int young = 0;
3323         struct lru_gen_mm_walk *walk = args->private;
3324         struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
3325         struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
3326         int old_gen, new_gen = lru_gen_from_seq(walk->max_seq);
3327
3328         VM_WARN_ON_ONCE(pmd_leaf(*pmd));
3329
3330         ptl = pte_lockptr(args->mm, pmd);
3331         if (!spin_trylock(ptl))
3332                 return false;
3333
3334         arch_enter_lazy_mmu_mode();
3335
3336         pte = pte_offset_map(pmd, start & PMD_MASK);
3337 restart:
3338         for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) {
3339                 unsigned long pfn;
3340                 struct page *page;
3341
3342                 total++;
3343                 walk->mm_stats[MM_LEAF_TOTAL]++;
3344
3345                 pfn = get_pte_pfn(pte[i], args->vma, addr);
3346                 if (pfn == -1)
3347                         continue;
3348
3349                 if (!pte_young(pte[i])) {
3350                         walk->mm_stats[MM_LEAF_OLD]++;
3351                         continue;
3352                 }
3353
3354                 page = get_pfn_page(pfn, memcg, pgdat, walk->can_swap);
3355                 if (!page)
3356                         continue;
3357
3358                 if (!ptep_test_and_clear_young(args->vma, addr, pte + i))
3359                         VM_WARN_ON_ONCE(true);
3360
3361                 young++;
3362                 walk->mm_stats[MM_LEAF_YOUNG]++;
3363
3364                 if (pte_dirty(pte[i]) && !PageDirty(page) &&
3365                     !(PageAnon(page) && PageSwapBacked(page) &&
3366                       !PageSwapCache(page)))
3367                         set_page_dirty(page);
3368
3369                 old_gen = page_update_gen(page, new_gen);
3370                 if (old_gen >= 0 && old_gen != new_gen)
3371                         update_batch_size(walk, page, old_gen, new_gen);
3372         }
3373
3374         if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end))
3375                 goto restart;
3376
3377         pte_unmap(pte);
3378
3379         arch_leave_lazy_mmu_mode();
3380         spin_unlock(ptl);
3381
3382         return suitable_to_scan(total, young);
3383 }
3384
3385 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
3386 static void walk_pmd_range_locked(pud_t *pud, unsigned long next, struct vm_area_struct *vma,
3387                                   struct mm_walk *args, unsigned long *bitmap, unsigned long *start)
3388 {
3389         int i;
3390         pmd_t *pmd;
3391         spinlock_t *ptl;
3392         struct lru_gen_mm_walk *walk = args->private;
3393         struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
3394         struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
3395         int old_gen, new_gen = lru_gen_from_seq(walk->max_seq);
3396
3397         VM_WARN_ON_ONCE(pud_leaf(*pud));
3398
3399         /* try to batch at most 1+MIN_LRU_BATCH+1 entries */
3400         if (*start == -1) {
3401                 *start = next;
3402                 return;
3403         }
3404
3405         i = next == -1 ? 0 : pmd_index(next) - pmd_index(*start);
3406         if (i && i <= MIN_LRU_BATCH) {
3407                 __set_bit(i - 1, bitmap);
3408                 return;
3409         }
3410
3411         pmd = pmd_offset(pud, *start);
3412
3413         ptl = pmd_lockptr(args->mm, pmd);
3414         if (!spin_trylock(ptl))
3415                 goto done;
3416
3417         arch_enter_lazy_mmu_mode();
3418
3419         do {
3420                 unsigned long pfn;
3421                 struct page *page;
3422                 unsigned long addr = i ? (*start & PMD_MASK) + i * PMD_SIZE : *start;
3423
3424                 pfn = get_pmd_pfn(pmd[i], vma, addr);
3425                 if (pfn == -1)
3426                         goto next;
3427
3428                 if (!pmd_trans_huge(pmd[i])) {
3429                         if (IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG))
3430                                 pmdp_test_and_clear_young(vma, addr, pmd + i);
3431                         goto next;
3432                 }
3433
3434                 page = get_pfn_page(pfn, memcg, pgdat, walk->can_swap);
3435                 if (!page)
3436                         goto next;
3437
3438                 if (!pmdp_test_and_clear_young(vma, addr, pmd + i))
3439                         goto next;
3440
3441                 walk->mm_stats[MM_LEAF_YOUNG]++;
3442
3443                 if (pmd_dirty(pmd[i]) && !PageDirty(page) &&
3444                     !(PageAnon(page) && PageSwapBacked(page) &&
3445                       !PageSwapCache(page)))
3446                         set_page_dirty(page);
3447
3448                 old_gen = page_update_gen(page, new_gen);
3449                 if (old_gen >= 0 && old_gen != new_gen)
3450                         update_batch_size(walk, page, old_gen, new_gen);
3451 next:
3452                 i = i > MIN_LRU_BATCH ? 0 : find_next_bit(bitmap, MIN_LRU_BATCH, i) + 1;
3453         } while (i <= MIN_LRU_BATCH);
3454
3455         arch_leave_lazy_mmu_mode();
3456         spin_unlock(ptl);
3457 done:
3458         *start = -1;
3459         bitmap_zero(bitmap, MIN_LRU_BATCH);
3460 }
3461 #else
3462 static void walk_pmd_range_locked(pud_t *pud, unsigned long next, struct vm_area_struct *vma,
3463                                   struct mm_walk *args, unsigned long *bitmap, unsigned long *start)
3464 {
3465 }
3466 #endif
3467
3468 static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
3469                            struct mm_walk *args)
3470 {
3471         int i;
3472         pmd_t *pmd;
3473         unsigned long next;
3474         unsigned long addr;
3475         struct vm_area_struct *vma;
3476         unsigned long pos = -1;
3477         struct lru_gen_mm_walk *walk = args->private;
3478         unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)] = {};
3479
3480         VM_WARN_ON_ONCE(pud_leaf(*pud));
3481
3482         /*
3483          * Finish an entire PMD in two passes: the first only reaches to PTE
3484          * tables to avoid taking the PMD lock; the second, if necessary, takes
3485          * the PMD lock to clear the accessed bit in PMD entries.
3486          */
3487         pmd = pmd_offset(pud, start & PUD_MASK);
3488 restart:
3489         /* walk_pte_range() may call get_next_vma() */
3490         vma = args->vma;
3491         for (i = pmd_index(start), addr = start; addr != end; i++, addr = next) {
3492                 pmd_t val = pmd_read_atomic(pmd + i);
3493
3494                 /* for pmd_read_atomic() */
3495                 barrier();
3496
3497                 next = pmd_addr_end(addr, end);
3498
3499                 if (!pmd_present(val) || is_huge_zero_pmd(val)) {
3500                         walk->mm_stats[MM_LEAF_TOTAL]++;
3501                         continue;
3502                 }
3503
3504 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3505                 if (pmd_trans_huge(val)) {
3506                         unsigned long pfn = pmd_pfn(val);
3507                         struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
3508
3509                         walk->mm_stats[MM_LEAF_TOTAL]++;
3510
3511                         if (!pmd_young(val)) {
3512                                 walk->mm_stats[MM_LEAF_OLD]++;
3513                                 continue;
3514                         }
3515
3516                         /* try to avoid unnecessary memory loads */
3517                         if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
3518                                 continue;
3519
3520                         walk_pmd_range_locked(pud, addr, vma, args, bitmap, &pos);
3521                         continue;
3522                 }
3523 #endif
3524                 walk->mm_stats[MM_NONLEAF_TOTAL]++;
3525
3526 #ifdef CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG
3527                 if (!pmd_young(val))
3528                         continue;
3529
3530                 walk_pmd_range_locked(pud, addr, vma, args, bitmap, &pos);
3531 #endif
3532                 if (!walk->force_scan && !test_bloom_filter(walk->lruvec, walk->max_seq, pmd + i))
3533                         continue;
3534
3535                 walk->mm_stats[MM_NONLEAF_FOUND]++;
3536
3537                 if (!walk_pte_range(&val, addr, next, args))
3538                         continue;
3539
3540                 walk->mm_stats[MM_NONLEAF_ADDED]++;
3541
3542                 /* carry over to the next generation */
3543                 update_bloom_filter(walk->lruvec, walk->max_seq + 1, pmd + i);
3544         }
3545
3546         walk_pmd_range_locked(pud, -1, vma, args, bitmap, &pos);
3547
3548         if (i < PTRS_PER_PMD && get_next_vma(PUD_MASK, PMD_SIZE, args, &start, &end))
3549                 goto restart;
3550 }
3551
3552 static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end,
3553                           struct mm_walk *args)
3554 {
3555         int i;
3556         pud_t *pud;
3557         unsigned long addr;
3558         unsigned long next;
3559         struct lru_gen_mm_walk *walk = args->private;
3560
3561         VM_WARN_ON_ONCE(p4d_leaf(*p4d));
3562
3563         pud = pud_offset(p4d, start & P4D_MASK);
3564 restart:
3565         for (i = pud_index(start), addr = start; addr != end; i++, addr = next) {
3566                 pud_t val = READ_ONCE(pud[i]);
3567
3568                 next = pud_addr_end(addr, end);
3569
3570                 if (!pud_present(val) || WARN_ON_ONCE(pud_leaf(val)))
3571                         continue;
3572
3573                 walk_pmd_range(&val, addr, next, args);
3574
3575                 /* a racy check to curtail the waiting time */
3576                 if (wq_has_sleeper(&walk->lruvec->mm_state.wait))
3577                         return 1;
3578
3579                 if (need_resched() || walk->batched >= MAX_LRU_BATCH) {
3580                         end = (addr | ~PUD_MASK) + 1;
3581                         goto done;
3582                 }
3583         }
3584
3585         if (i < PTRS_PER_PUD && get_next_vma(P4D_MASK, PUD_SIZE, args, &start, &end))
3586                 goto restart;
3587
3588         end = round_up(end, P4D_SIZE);
3589 done:
3590         if (!end || !args->vma)
3591                 return 1;
3592
3593         walk->next_addr = max(end, args->vma->vm_start);
3594
3595         return -EAGAIN;
3596 }
3597
3598 static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_mm_walk *walk)
3599 {
3600         static const struct mm_walk_ops mm_walk_ops = {
3601                 .test_walk = should_skip_vma,
3602                 .p4d_entry = walk_pud_range,
3603         };
3604
3605         int err;
3606         struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3607
3608         walk->next_addr = FIRST_USER_ADDRESS;
3609
3610         do {
3611                 err = -EBUSY;
3612
3613                 /* page_update_gen() requires stable page_memcg() */
3614                 if (!mem_cgroup_trylock_pages(memcg))
3615                         break;
3616
3617                 /* the caller might be holding the lock for write */
3618                 if (down_read_trylock(&mm->mmap_sem)) {
3619                         err = walk_page_range(mm, walk->next_addr, ULONG_MAX, &mm_walk_ops, walk);
3620
3621                         up_write(&mm->mmap_sem);
3622                 }
3623
3624                 mem_cgroup_unlock_pages();
3625
3626                 if (walk->batched) {
3627                         spin_lock_irq(&lruvec_pgdat(lruvec)->lru_lock);
3628                         reset_batch_size(lruvec, walk);
3629                         spin_unlock_irq(&lruvec_pgdat(lruvec)->lru_lock);
3630                 }
3631
3632                 cond_resched();
3633         } while (err == -EAGAIN);
3634 }
3635
3636 static struct lru_gen_mm_walk *set_mm_walk(struct pglist_data *pgdat)
3637 {
3638         struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk;
3639
3640         if (pgdat && current_is_kswapd()) {
3641                 VM_WARN_ON_ONCE(walk);
3642
3643                 walk = &pgdat->mm_walk;
3644         } else if (!pgdat && !walk) {
3645                 VM_WARN_ON_ONCE(current_is_kswapd());
3646
3647                 walk = kzalloc(sizeof(*walk), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
3648         }
3649
3650         current->reclaim_state->mm_walk = walk;
3651
3652         return walk;
3653 }
3654
3655 static void clear_mm_walk(void)
3656 {
3657         struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk;
3658
3659         VM_WARN_ON_ONCE(walk && memchr_inv(walk->nr_pages, 0, sizeof(walk->nr_pages)));
3660         VM_WARN_ON_ONCE(walk && memchr_inv(walk->mm_stats, 0, sizeof(walk->mm_stats)));
3661
3662         current->reclaim_state->mm_walk = NULL;
3663
3664         if (!current_is_kswapd())
3665                 kfree(walk);
3666 }
3667
3668 static void inc_min_seq(struct lruvec *lruvec, int type)
3669 {
3670         struct lru_gen_struct *lrugen = &lruvec->lrugen;
3671
3672         reset_ctrl_pos(lruvec, type, true);
3673         WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1);
3674 }
3675
3676 static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap)
3677 {
3678         int gen, type, zone;
3679         bool success = false;
3680         struct lru_gen_struct *lrugen = &lruvec->lrugen;
3681         DEFINE_MIN_SEQ(lruvec);
3682
3683         VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
3684
3685         /* find the oldest populated generation */
3686         for (type = !can_swap; type < ANON_AND_FILE; type++) {
3687                 while (min_seq[type] + MIN_NR_GENS <= lrugen->max_seq) {
3688                         gen = lru_gen_from_seq(min_seq[type]);
3689
3690                         for (zone = 0; zone < MAX_NR_ZONES; zone++) {
3691                                 if (!list_empty(&lrugen->lists[gen][type][zone]))
3692                                         goto next;
3693                         }
3694
3695                         min_seq[type]++;
3696                 }
3697 next:
3698                 ;
3699         }
3700
3701         /* see the comment on lru_gen_struct */
3702         if (can_swap) {
3703                 min_seq[LRU_GEN_ANON] = min(min_seq[LRU_GEN_ANON], min_seq[LRU_GEN_FILE]);
3704                 min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]);
3705         }
3706
3707         for (type = !can_swap; type < ANON_AND_FILE; type++) {
3708                 if (min_seq[type] == lrugen->min_seq[type])
3709                         continue;
3710
3711                 reset_ctrl_pos(lruvec, type, true);
3712                 WRITE_ONCE(lrugen->min_seq[type], min_seq[type]);
3713                 success = true;
3714         }
3715
3716         return success;
3717 }
3718
3719 static void inc_max_seq(struct lruvec *lruvec, bool can_swap)
3720 {
3721         int prev, next;
3722         int type, zone;
3723         struct lru_gen_struct *lrugen = &lruvec->lrugen;
3724
3725         spin_lock_irq(&lruvec_pgdat(lruvec)->lru_lock);
3726
3727         VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
3728
3729         for (type = ANON_AND_FILE - 1; type >= 0; type--) {
3730                 if (get_nr_gens(lruvec, type) != MAX_NR_GENS)
3731                         continue;
3732
3733                 VM_WARN_ON_ONCE(type == LRU_GEN_FILE || can_swap);
3734
3735                 inc_min_seq(lruvec, type);
3736         }
3737
3738         /*
3739          * Update the active/inactive LRU sizes for compatibility. Both sides of
3740          * the current max_seq need to be covered, since max_seq+1 can overlap
3741          * with min_seq[LRU_GEN_ANON] if swapping is constrained. And if they do
3742          * overlap, cold/hot inversion happens.
3743          */
3744         prev = lru_gen_from_seq(lrugen->max_seq - 1);
3745         next = lru_gen_from_seq(lrugen->max_seq + 1);
3746
3747         for (type = 0; type < ANON_AND_FILE; type++) {
3748                 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
3749                         enum lru_list lru = type * LRU_INACTIVE_FILE;
3750                         long delta = lrugen->nr_pages[prev][type][zone] -
3751                                      lrugen->nr_pages[next][type][zone];
3752
3753                         if (!delta)
3754                                 continue;
3755
3756                         __update_lru_size(lruvec, lru, zone, delta);
3757                         __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta);
3758                 }
3759         }
3760
3761         for (type = 0; type < ANON_AND_FILE; type++)
3762                 reset_ctrl_pos(lruvec, type, false);
3763
3764         /* make sure preceding modifications appear */
3765         smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1);
3766
3767         spin_unlock_irq(&lruvec_pgdat(lruvec)->lru_lock);
3768 }
3769
3770 static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
3771                                struct scan_control *sc, bool can_swap)
3772 {
3773         bool success;
3774         struct lru_gen_mm_walk *walk;
3775         struct mm_struct *mm = NULL;
3776         struct lru_gen_struct *lrugen = &lruvec->lrugen;
3777
3778         VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq));
3779
3780         /* see the comment in iterate_mm_list() */
3781         if (max_seq <= READ_ONCE(lruvec->mm_state.seq)) {
3782                 success = false;
3783                 goto done;
3784         }
3785
3786         /*
3787          * If the hardware doesn't automatically set the accessed bit, fallback
3788          * to lru_gen_look_around(), which only clears the accessed bit in a
3789          * handful of PTEs. Spreading the work out over a period of time usually
3790          * is less efficient, but it avoids bursty page faults.
3791          */
3792         if (!arch_has_hw_pte_young()) {
3793                 success = iterate_mm_list_nowalk(lruvec, max_seq);
3794                 goto done;
3795         }
3796
3797         walk = set_mm_walk(NULL);
3798         if (!walk) {
3799                 success = iterate_mm_list_nowalk(lruvec, max_seq);
3800                 goto done;
3801         }
3802
3803         walk->lruvec = lruvec;
3804         walk->max_seq = max_seq;
3805         walk->can_swap = can_swap;
3806         walk->force_scan = false;
3807
3808         do {
3809                 success = iterate_mm_list(lruvec, walk, &mm);
3810                 if (mm)
3811                         walk_mm(lruvec, mm, walk);
3812
3813                 cond_resched();
3814         } while (mm);
3815 done:
3816         if (!success) {
3817                 if (sc->priority <= DEF_PRIORITY - 2)
3818                         wait_event_killable(lruvec->mm_state.wait,
3819                                             max_seq < READ_ONCE(lrugen->max_seq));
3820
3821                 return max_seq < READ_ONCE(lrugen->max_seq);
3822         }
3823
3824         VM_WARN_ON_ONCE(max_seq != READ_ONCE(lrugen->max_seq));
3825
3826         inc_max_seq(lruvec, can_swap);
3827         /* either this sees any waiters or they will see updated max_seq */
3828         if (wq_has_sleeper(&lruvec->mm_state.wait))
3829                 wake_up_all(&lruvec->mm_state.wait);
3830
3831         return true;
3832 }
3833
3834 static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, unsigned long *min_seq,
3835                              struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
3836 {
3837         int gen, type, zone;
3838         unsigned long old = 0;
3839         unsigned long young = 0;
3840         unsigned long total = 0;
3841         struct lru_gen_struct *lrugen = &lruvec->lrugen;
3842         struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3843
3844         for (type = !can_swap; type < ANON_AND_FILE; type++) {
3845                 unsigned long seq;
3846
3847                 for (seq = min_seq[type]; seq <= max_seq; seq++) {
3848                         unsigned long size = 0;
3849
3850                         gen = lru_gen_from_seq(seq);
3851
3852                         for (zone = 0; zone < MAX_NR_ZONES; zone++)
3853                                 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
3854
3855                         total += size;
3856                         if (seq == max_seq)
3857                                 young += size;
3858                         else if (seq + MIN_NR_GENS == max_seq)
3859                                 old += size;
3860                 }
3861         }
3862
3863         /* try to scrape all its memory if this memcg was deleted */
3864         *nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
3865
3866         /*
3867          * The aging tries to be lazy to reduce the overhead, while the eviction
3868          * stalls when the number of generations reaches MIN_NR_GENS. Hence, the
3869          * ideal number of generations is MIN_NR_GENS+1.
3870          */
3871         if (min_seq[!can_swap] + MIN_NR_GENS > max_seq)
3872                 return true;
3873         if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
3874                 return false;
3875
3876         /*
3877          * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1)
3878          * of the total number of pages for each generation. A reasonable range
3879          * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The
3880          * aging cares about the upper bound of hot pages, while the eviction
3881          * cares about the lower bound of cold pages.
3882          */
3883         if (young * MIN_NR_GENS > total)
3884                 return true;
3885         if (old * (MIN_NR_GENS + 2) < total)
3886                 return true;
3887
3888         return false;
3889 }
3890
3891 static void age_lruvec(struct lruvec *lruvec, struct scan_control *sc)
3892 {
3893         bool need_aging;
3894         unsigned long nr_to_scan;
3895         int swappiness = get_swappiness(lruvec, sc);
3896         struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3897         DEFINE_MAX_SEQ(lruvec);
3898         DEFINE_MIN_SEQ(lruvec);
3899
3900         VM_WARN_ON_ONCE(sc->memcg_low_reclaim);
3901
3902         mem_cgroup_calculate_protection(NULL, memcg);
3903
3904         if (mem_cgroup_below_min(memcg))
3905                 return;
3906
3907         need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, swappiness, &nr_to_scan);
3908         if (need_aging)
3909                 try_to_inc_max_seq(lruvec, max_seq, sc, swappiness);
3910 }
3911
3912 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
3913 {
3914         struct mem_cgroup *memcg;
3915
3916         VM_WARN_ON_ONCE(!current_is_kswapd());
3917
3918         sc->last_reclaimed = sc->nr_reclaimed;
3919
3920         /*
3921          * To reduce the chance of going into the aging path, which can be
3922          * costly, optimistically skip it if the flag below was cleared in the
3923          * eviction path. This improves the overall performance when multiple
3924          * memcgs are available.
3925          */
3926         if (!sc->memcgs_need_aging) {
3927                 sc->memcgs_need_aging = true;
3928                 return;
3929         }
3930
3931         set_mm_walk(pgdat);
3932
3933         memcg = mem_cgroup_iter(NULL, NULL, NULL);
3934         do {
3935                 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
3936
3937                 age_lruvec(lruvec, sc);
3938
3939                 cond_resched();
3940         } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
3941
3942         clear_mm_walk();
3943 }
3944
3945 /*
3946  * This function exploits spatial locality when shrink_page_list() walks the
3947  * rmap. It scans the adjacent PTEs of a young PTE and promotes hot pages. If
3948  * the scan was done cacheline efficiently, it adds the PMD entry pointing to
3949  * the PTE table to the Bloom filter. This forms a feedback loop between the
3950  * eviction and the aging.
3951  */
3952 void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
3953 {
3954         int i;
3955         pte_t *pte;
3956         unsigned long start;
3957         unsigned long end;
3958         unsigned long addr;
3959         struct lru_gen_mm_walk *walk;
3960         int young = 0;
3961         unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)] = {};
3962         struct page *page = pvmw->page;
3963         struct mem_cgroup *memcg = page_memcg(page);
3964         struct pglist_data *pgdat = page_pgdat(page);
3965         struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
3966         DEFINE_MAX_SEQ(lruvec);
3967         int old_gen, new_gen = lru_gen_from_seq(max_seq);
3968
3969         lockdep_assert_held(pvmw->ptl);
3970         VM_WARN_ON_ONCE_PAGE(PageLRU(page), page);
3971
3972         if (spin_is_contended(pvmw->ptl))
3973                 return;
3974
3975         /* avoid taking the LRU lock under the PTL when possible */
3976         walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL;
3977
3978         start = max(pvmw->address & PMD_MASK, pvmw->vma->vm_start);
3979         end = min(pvmw->address | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1;
3980
3981         if (end - start > MIN_LRU_BATCH * PAGE_SIZE) {
3982                 if (pvmw->address - start < MIN_LRU_BATCH * PAGE_SIZE / 2)
3983                         end = start + MIN_LRU_BATCH * PAGE_SIZE;
3984                 else if (end - pvmw->address < MIN_LRU_BATCH * PAGE_SIZE / 2)
3985                         start = end - MIN_LRU_BATCH * PAGE_SIZE;
3986                 else {
3987                         start = pvmw->address - MIN_LRU_BATCH * PAGE_SIZE / 2;
3988                         end = pvmw->address + MIN_LRU_BATCH * PAGE_SIZE / 2;
3989                 }
3990         }
3991
3992         pte = pvmw->pte - (pvmw->address - start) / PAGE_SIZE;
3993
3994         rcu_read_lock();
3995         arch_enter_lazy_mmu_mode();
3996
3997         for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) {
3998                 unsigned long pfn;
3999
4000                 pfn = get_pte_pfn(pte[i], pvmw->vma, addr);
4001                 if (pfn == -1)
4002                         continue;
4003
4004                 if (!pte_young(pte[i]))
4005                         continue;
4006
4007                 page = get_pfn_page(pfn, memcg, pgdat, !walk || walk->can_swap);
4008                 if (!page)
4009                         continue;
4010
4011                 if (!ptep_test_and_clear_young(pvmw->vma, addr, pte + i))
4012                         VM_WARN_ON_ONCE(true);
4013
4014                 young++;
4015
4016                 if (pte_dirty(pte[i]) && !PageDirty(page) &&
4017                     !(PageAnon(page) && PageSwapBacked(page) &&
4018                       !PageSwapCache(page)))
4019                         set_page_dirty(page);
4020
4021                 old_gen = page_lru_gen(page);
4022                 if (old_gen < 0)
4023                         SetPageReferenced(page);
4024                 else if (old_gen != new_gen)
4025                         __set_bit(i, bitmap);
4026         }
4027
4028         arch_leave_lazy_mmu_mode();
4029         rcu_read_unlock();
4030
4031         /* feedback from rmap walkers to page table walkers */
4032         if (suitable_to_scan(i, young))
4033                 update_bloom_filter(lruvec, max_seq, pvmw->pmd);
4034
4035         if (!walk && bitmap_weight(bitmap, MIN_LRU_BATCH) < PAGEVEC_SIZE) {
4036                 for_each_set_bit(i, bitmap, MIN_LRU_BATCH) {
4037                         page = pte_page(pte[i]);
4038                         activate_page(page);
4039                 }
4040                 return;
4041         }
4042
4043         /* page_update_gen() requires stable page_memcg() */
4044         if (!mem_cgroup_trylock_pages(memcg))
4045                 return;
4046
4047         if (!walk) {
4048                 spin_lock_irq(&lruvec_pgdat(lruvec)->lru_lock);
4049                 new_gen = lru_gen_from_seq(lruvec->lrugen.max_seq);
4050         }
4051
4052         for_each_set_bit(i, bitmap, MIN_LRU_BATCH) {
4053                 page = compound_head(pte_page(pte[i]));
4054                 if (page_memcg_rcu(page) != memcg)
4055                         continue;
4056
4057                 old_gen = page_update_gen(page, new_gen);
4058                 if (old_gen < 0 || old_gen == new_gen)
4059                         continue;
4060
4061                 if (walk)
4062                         update_batch_size(walk, page, old_gen, new_gen);
4063                 else
4064                         lru_gen_update_size(lruvec, page, old_gen, new_gen);
4065         }
4066
4067         if (!walk)
4068                 spin_unlock_irq(&lruvec_pgdat(lruvec)->lru_lock);
4069
4070         mem_cgroup_unlock_pages();
4071 }
4072
4073 /******************************************************************************
4074  *                          the eviction
4075  ******************************************************************************/
4076
4077 static bool sort_page(struct lruvec *lruvec, struct page *page, int tier_idx)
4078 {
4079         bool success;
4080         int gen = page_lru_gen(page);
4081         int type = page_is_file_cache(page);
4082         int zone = page_zonenum(page);
4083         int delta = hpage_nr_pages(page);
4084         int refs = page_lru_refs(page);
4085         int tier = lru_tier_from_refs(refs);
4086         struct lru_gen_struct *lrugen = &lruvec->lrugen;
4087
4088         VM_WARN_ON_ONCE_PAGE(gen >= MAX_NR_GENS, page);
4089
4090         /* unevictable */
4091         if (!page_evictable(page)) {
4092                 success = lru_gen_del_page(lruvec, page, true);
4093                 VM_WARN_ON_ONCE_PAGE(!success, page);
4094                 SetPageUnevictable(page);
4095                 add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
4096                 __count_vm_events(UNEVICTABLE_PGCULLED, delta);
4097                 return true;
4098         }
4099
4100         /* dirty lazyfree */
4101         if (type == LRU_GEN_FILE && PageAnon(page) && PageDirty(page)) {
4102                 enum lru_list lru = page_lru_base_type(page);
4103
4104                 success = lru_gen_del_page(lruvec, page, true);
4105                 VM_WARN_ON_ONCE_PAGE(!success, page);
4106                 SetPageSwapBacked(page);
4107                 add_page_to_lru_list_tail(page, lruvec, lru);
4108                 return true;
4109         }
4110
4111         /* promoted */
4112         if (gen != lru_gen_from_seq(lrugen->min_seq[type])) {
4113                 list_move(&page->lru, &lrugen->lists[gen][type][zone]);
4114                 return true;
4115         }
4116
4117         /* protected */
4118         if (tier > tier_idx) {
4119                 int hist = lru_hist_from_seq(lrugen->min_seq[type]);
4120
4121                 gen = page_inc_gen(lruvec, page, false);
4122                 list_move_tail(&page->lru, &lrugen->lists[gen][type][zone]);
4123
4124                 WRITE_ONCE(lrugen->protected[hist][type][tier - 1],
4125                            lrugen->protected[hist][type][tier - 1] + delta);
4126                 __mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE, delta);
4127                 return true;
4128         }
4129
4130         /* waiting for writeback */
4131         if (PageLocked(page) || PageWriteback(page) ||
4132             (type == LRU_GEN_FILE && PageDirty(page))) {
4133                 gen = page_inc_gen(lruvec, page, true);
4134                 list_move(&page->lru, &lrugen->lists[gen][type][zone]);
4135                 return true;
4136         }
4137
4138         return false;
4139 }
4140
4141 static bool isolate_page(struct lruvec *lruvec, struct page *page, struct scan_control *sc)
4142 {
4143         bool success;
4144
4145         /* unmapping inhibited */
4146         if (!sc->may_unmap && page_mapped(page))
4147                 return false;
4148
4149         /* swapping inhibited */
4150         if (!(sc->may_writepage && (sc->gfp_mask & __GFP_IO)) &&
4151             (PageDirty(page) ||
4152              (PageAnon(page) && !PageSwapCache(page))))
4153                 return false;
4154
4155         /* raced with release_pages() */
4156         if (!get_page_unless_zero(page))
4157                 return false;
4158
4159         /* raced with another isolation */
4160         if (!TestClearPageLRU(page)) {
4161                 put_page(page);
4162                 return false;
4163         }
4164
4165         /* see the comment on MAX_NR_TIERS */
4166         if (!PageReferenced(page))
4167                 set_mask_bits(&page->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0);
4168
4169         /* for shrink_page_list() */
4170         ClearPageReclaim(page);
4171         ClearPageReferenced(page);
4172
4173         success = lru_gen_del_page(lruvec, page, true);
4174         VM_WARN_ON_ONCE_PAGE(!success, page);
4175
4176         return true;
4177 }
4178
4179 static int scan_pages(struct lruvec *lruvec, struct scan_control *sc,
4180                       int type, int tier, struct list_head *list)
4181 {
4182         int gen, zone;
4183         enum vm_event_item item;
4184         int sorted = 0;
4185         int scanned = 0;
4186         int isolated = 0;
4187         int remaining = MAX_LRU_BATCH;
4188         struct lru_gen_struct *lrugen = &lruvec->lrugen;
4189         struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4190
4191         VM_WARN_ON_ONCE(!list_empty(list));
4192
4193         if (get_nr_gens(lruvec, type) == MIN_NR_GENS)
4194                 return 0;
4195
4196         gen = lru_gen_from_seq(lrugen->min_seq[type]);
4197
4198         for (zone = sc->reclaim_idx; zone >= 0; zone--) {
4199                 LIST_HEAD(moved);
4200                 int skipped = 0;
4201                 struct list_head *head = &lrugen->lists[gen][type][zone];
4202
4203                 while (!list_empty(head)) {
4204                         struct page *page = lru_to_page(head);
4205                         int delta = hpage_nr_pages(page);
4206
4207                         VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page);
4208                         VM_WARN_ON_ONCE_PAGE(PageActive(page), page);
4209                         VM_WARN_ON_ONCE_PAGE(page_is_file_cache(page) != type, page);
4210                         VM_WARN_ON_ONCE_PAGE(page_zonenum(page) != zone, page);
4211
4212                         scanned += delta;
4213
4214                         if (sort_page(lruvec, page, tier))
4215                                 sorted += delta;
4216                         else if (isolate_page(lruvec, page, sc)) {
4217                                 list_add(&page->lru, list);
4218                                 isolated += delta;
4219                         } else {
4220                                 list_move(&page->lru, &moved);
4221                                 skipped += delta;
4222                         }
4223
4224                         if (!--remaining || max(isolated, skipped) >= MIN_LRU_BATCH)
4225                                 break;
4226                 }
4227
4228                 if (skipped) {
4229                         list_splice(&moved, head);
4230                         __count_zid_vm_events(PGSCAN_SKIP, zone, skipped);
4231                 }
4232
4233                 if (!remaining || isolated >= MIN_LRU_BATCH)
4234                         break;
4235         }
4236
4237         item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
4238         if (!cgroup_reclaim(sc)) {
4239                 __count_vm_events(item, isolated);
4240                 __count_vm_events(PGREFILL, sorted);
4241         }
4242         __count_memcg_events(memcg, item, isolated);
4243         __count_memcg_events(memcg, PGREFILL, sorted);
4244
4245         /*
4246          * There might not be eligible pages due to reclaim_idx, may_unmap and
4247          * may_writepage. Check the remaining to prevent livelock if it's not
4248          * making progress.
4249          */
4250         return isolated || !remaining ? scanned : 0;
4251 }
4252
4253 static int get_tier_idx(struct lruvec *lruvec, int type)
4254 {
4255         int tier;
4256         struct ctrl_pos sp, pv;
4257
4258         /*
4259          * To leave a margin for fluctuations, use a larger gain factor (1:2).
4260          * This value is chosen because any other tier would have at least twice
4261          * as many refaults as the first tier.
4262          */
4263         read_ctrl_pos(lruvec, type, 0, 1, &sp);
4264         for (tier = 1; tier < MAX_NR_TIERS; tier++) {
4265                 read_ctrl_pos(lruvec, type, tier, 2, &pv);
4266                 if (!positive_ctrl_err(&sp, &pv))
4267                         break;
4268         }
4269
4270         return tier - 1;
4271 }
4272
4273 static int get_type_to_scan(struct lruvec *lruvec, int swappiness, int *tier_idx)
4274 {
4275         int type, tier;
4276         struct ctrl_pos sp, pv;
4277         int gain[ANON_AND_FILE] = { swappiness, 200 - swappiness };
4278
4279         /*
4280          * Compare the first tier of anon with that of file to determine which
4281          * type to scan. Also need to compare other tiers of the selected type
4282          * with the first tier of the other type to determine the last tier (of
4283          * the selected type) to evict.
4284          */
4285         read_ctrl_pos(lruvec, LRU_GEN_ANON, 0, gain[LRU_GEN_ANON], &sp);
4286         read_ctrl_pos(lruvec, LRU_GEN_FILE, 0, gain[LRU_GEN_FILE], &pv);
4287         type = positive_ctrl_err(&sp, &pv);
4288
4289         read_ctrl_pos(lruvec, !type, 0, gain[!type], &sp);
4290         for (tier = 1; tier < MAX_NR_TIERS; tier++) {
4291                 read_ctrl_pos(lruvec, type, tier, gain[type], &pv);
4292                 if (!positive_ctrl_err(&sp, &pv))
4293                         break;
4294         }
4295
4296         *tier_idx = tier - 1;
4297
4298         return type;
4299 }
4300
4301 static int isolate_pages(struct lruvec *lruvec, struct scan_control *sc, int swappiness,
4302                          int *type_scanned, struct list_head *list)
4303 {
4304         int i;
4305         int type;
4306         int scanned;
4307         int tier = -1;
4308         DEFINE_MIN_SEQ(lruvec);
4309
4310         /*
4311          * Try to make the obvious choice first. When anon and file are both
4312          * available from the same generation, interpret swappiness 1 as file
4313          * first and 200 as anon first.
4314          */
4315         if (!swappiness)
4316                 type = LRU_GEN_FILE;
4317         else if (min_seq[LRU_GEN_ANON] < min_seq[LRU_GEN_FILE])
4318                 type = LRU_GEN_ANON;
4319         else if (swappiness == 1)
4320                 type = LRU_GEN_FILE;
4321         else if (swappiness == 200)
4322                 type = LRU_GEN_ANON;
4323         else
4324                 type = get_type_to_scan(lruvec, swappiness, &tier);
4325
4326         for (i = !swappiness; i < ANON_AND_FILE; i++) {
4327                 if (tier < 0)
4328                         tier = get_tier_idx(lruvec, type);
4329
4330                 scanned = scan_pages(lruvec, sc, type, tier, list);
4331                 if (scanned)
4332                         break;
4333
4334                 type = !type;
4335                 tier = -1;
4336         }
4337
4338         *type_scanned = type;
4339
4340         return scanned;
4341 }
4342
4343 static int evict_pages(struct lruvec *lruvec, struct scan_control *sc, int swappiness,
4344                        bool *need_swapping)
4345 {
4346         int type;
4347         int scanned;
4348         int reclaimed;
4349         LIST_HEAD(list);
4350         struct page *page;
4351         enum vm_event_item item;
4352         struct reclaim_stat stat;
4353         struct lru_gen_mm_walk *walk;
4354         struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4355         struct pglist_data *pgdat = lruvec_pgdat(lruvec);
4356
4357         spin_lock_irq(&lruvec_pgdat(lruvec)->lru_lock);
4358
4359         scanned = isolate_pages(lruvec, sc, swappiness, &type, &list);
4360
4361         scanned += try_to_inc_min_seq(lruvec, swappiness);
4362
4363         if (get_nr_gens(lruvec, !swappiness) == MIN_NR_GENS)
4364                 scanned = 0;
4365
4366         spin_unlock_irq(&lruvec_pgdat(lruvec)->lru_lock);
4367
4368         if (list_empty(&list))
4369                 return scanned;
4370
4371         reclaimed = shrink_page_list(&list, pgdat, sc, &stat, false);
4372
4373         list_for_each_entry(page, &list, lru) {
4374                 /* restore LRU_REFS_FLAGS cleared by isolate_page() */
4375                 if (PageWorkingset(page))
4376                         SetPageReferenced(page);
4377
4378                 /* don't add rejected pages to the oldest generation */
4379                 if (PageReclaim(page) &&
4380                     (PageDirty(page) || PageWriteback(page)))
4381                         ClearPageActive(page);
4382                 else
4383                         SetPageActive(page);
4384         }
4385
4386         spin_lock_irq(&lruvec_pgdat(lruvec)->lru_lock);
4387
4388         move_pages_to_lru(lruvec, &list);
4389
4390         walk = current->reclaim_state->mm_walk;
4391         if (walk && walk->batched)
4392                 reset_batch_size(lruvec, walk);
4393
4394         item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
4395         if (!cgroup_reclaim(sc))
4396                 __count_vm_events(item, reclaimed);
4397         __count_memcg_events(memcg, item, reclaimed);
4398
4399         spin_unlock_irq(&lruvec_pgdat(lruvec)->lru_lock);
4400
4401         mem_cgroup_uncharge_list(&list);
4402         free_unref_page_list(&list);
4403
4404         sc->nr_reclaimed += reclaimed;
4405
4406         if (need_swapping && type == LRU_GEN_ANON)
4407                 *need_swapping = true;
4408
4409         return scanned;
4410 }
4411
4412 /*
4413  * For future optimizations:
4414  * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
4415  *    reclaim.
4416  */
4417 static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc,
4418                                     bool can_swap, bool *need_aging)
4419 {
4420         unsigned long nr_to_scan;
4421         struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4422         DEFINE_MAX_SEQ(lruvec);
4423         DEFINE_MIN_SEQ(lruvec);
4424
4425         if (mem_cgroup_below_min(memcg) ||
4426             (mem_cgroup_below_low(memcg) && !sc->memcg_low_reclaim))
4427                 return 0;
4428
4429         *need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, can_swap, &nr_to_scan);
4430         if (!*need_aging)
4431                 return nr_to_scan;
4432
4433         /* skip the aging path at the default priority */
4434         if (sc->priority == DEF_PRIORITY)
4435                 goto done;
4436
4437         /* leave the work to lru_gen_age_node() */
4438         if (current_is_kswapd())
4439                 return 0;
4440
4441         if (try_to_inc_max_seq(lruvec, max_seq, sc, can_swap))
4442                 return nr_to_scan;
4443 done:
4444         return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0;
4445 }
4446
4447 static bool should_abort_scan(struct lruvec *lruvec, unsigned long seq,
4448                               struct scan_control *sc, bool need_swapping)
4449 {
4450         int i;
4451         DEFINE_MAX_SEQ(lruvec);
4452
4453         if (!current_is_kswapd()) {
4454                 /* age each memcg at most once to ensure fairness */
4455                 if (max_seq - seq > 1)
4456                         return true;
4457
4458                 /* over-swapping can increase allocation latency */
4459                 if (sc->nr_reclaimed >= sc->nr_to_reclaim && need_swapping)
4460                         return true;
4461
4462                 /* give this thread a chance to exit and free its memory */
4463                 if (fatal_signal_pending(current)) {
4464                         sc->nr_reclaimed += MIN_LRU_BATCH;
4465                         return true;
4466                 }
4467
4468                 if (cgroup_reclaim(sc))
4469                         return false;
4470         } else if (sc->nr_reclaimed - sc->last_reclaimed < sc->nr_to_reclaim)
4471                 return false;
4472
4473         /* keep scanning at low priorities to ensure fairness */
4474         if (sc->priority > DEF_PRIORITY - 2)
4475                 return false;
4476
4477         /*
4478          * A minimum amount of work was done under global memory pressure. For
4479          * kswapd, it may be overshooting. For direct reclaim, the allocation
4480          * may succeed if all suitable zones are somewhat safe. In either case,
4481          * it's better to stop now, and restart later if necessary.
4482          */
4483         for (i = 0; i <= sc->reclaim_idx; i++) {
4484                 unsigned long wmark;
4485                 struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i;
4486
4487                 if (!managed_zone(zone))
4488                         continue;
4489
4490                 wmark = current_is_kswapd() ? high_wmark_pages(zone) : low_wmark_pages(zone);
4491                 if (wmark > zone_page_state(zone, NR_FREE_PAGES))
4492                         return false;
4493         }
4494
4495         sc->nr_reclaimed += MIN_LRU_BATCH;
4496
4497         return true;
4498 }
4499
4500 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
4501 {
4502         struct blk_plug plug;
4503         bool need_aging = false;
4504         bool need_swapping = false;
4505         unsigned long scanned = 0;
4506         unsigned long reclaimed = sc->nr_reclaimed;
4507         DEFINE_MAX_SEQ(lruvec);
4508
4509         lru_add_drain();
4510
4511         blk_start_plug(&plug);
4512
4513         set_mm_walk(lruvec_pgdat(lruvec));
4514
4515         while (true) {
4516                 int delta;
4517                 int swappiness;
4518                 unsigned long nr_to_scan;
4519
4520                 if (sc->may_swap)
4521                         swappiness = get_swappiness(lruvec, sc);
4522                 else if (!cgroup_reclaim(sc) && get_swappiness(lruvec, sc))
4523                         swappiness = 1;
4524                 else
4525                         swappiness = 0;
4526
4527                 nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness, &need_aging);
4528                 if (!nr_to_scan)
4529                         goto done;
4530
4531                 delta = evict_pages(lruvec, sc, swappiness, &need_swapping);
4532                 if (!delta)
4533                         goto done;
4534
4535                 scanned += delta;
4536                 if (scanned >= nr_to_scan)
4537                         break;
4538
4539                 if (should_abort_scan(lruvec, max_seq, sc, need_swapping))
4540                         break;
4541
4542                 cond_resched();
4543         }
4544
4545         /* see the comment in lru_gen_age_node() */
4546         if (sc->nr_reclaimed - reclaimed >= MIN_LRU_BATCH && !need_aging)
4547                 sc->memcgs_need_aging = false;
4548 done:
4549         clear_mm_walk();
4550
4551         blk_finish_plug(&plug);
4552 }
4553
4554 /******************************************************************************
4555  *                          initialization
4556  ******************************************************************************/
4557
4558 void lru_gen_init_lruvec(struct lruvec *lruvec)
4559 {
4560         int gen, type, zone;
4561         struct lru_gen_struct *lrugen = &lruvec->lrugen;
4562
4563         lrugen->max_seq = MIN_NR_GENS + 1;
4564
4565         for_each_gen_type_zone(gen, type, zone)
4566                 INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]);
4567
4568         lruvec->mm_state.seq = MIN_NR_GENS;
4569         init_waitqueue_head(&lruvec->mm_state.wait);
4570 }
4571
4572 #ifdef CONFIG_MEMCG
4573 void lru_gen_init_memcg(struct mem_cgroup *memcg)
4574 {
4575         INIT_LIST_HEAD(&memcg->mm_list.fifo);
4576         spin_lock_init(&memcg->mm_list.lock);
4577 }
4578
4579 void lru_gen_exit_memcg(struct mem_cgroup *memcg)
4580 {
4581         int i;
4582         int nid;
4583
4584         for_each_node(nid) {
4585                 struct lruvec *lruvec = get_lruvec(memcg, nid);
4586
4587                 VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0,
4588                                            sizeof(lruvec->lrugen.nr_pages)));
4589
4590                 for (i = 0; i < NR_BLOOM_FILTERS; i++) {
4591                         bitmap_free(lruvec->mm_state.filters[i]);
4592                         lruvec->mm_state.filters[i] = NULL;
4593                 }
4594         }
4595 }
4596 #endif
4597
4598 static int __init init_lru_gen(void)
4599 {
4600         BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS);
4601         BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS);
4602
4603         return 0;
4604 };
4605 late_initcall(init_lru_gen);
4606
4607 #else /* !CONFIG_LRU_GEN */
4608
4609 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
4610 {
4611 }
4612
4613 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
4614 {
4615 }
4616
4617 #endif /* CONFIG_LRU_GEN */
4618
4619 static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
4620 {
4621         unsigned long nr[NR_LRU_LISTS];
4622         unsigned long targets[NR_LRU_LISTS];
4623         unsigned long nr_to_scan;
4624         enum lru_list lru;
4625         unsigned long nr_reclaimed = 0;
4626         unsigned long nr_to_reclaim = sc->nr_to_reclaim;
4627         struct blk_plug plug;
4628         bool scan_adjusted;
4629
4630         if (lru_gen_enabled()) {
4631                 lru_gen_shrink_lruvec(lruvec, sc);
4632                 return;
4633         }
4634
4635         get_scan_count(lruvec, sc, nr);
4636
4637         /* Record the original scan target for proportional adjustments later */
4638         memcpy(targets, nr, sizeof(nr));
4639
4640         /*
4641          * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
4642          * event that can occur when there is little memory pressure e.g.
4643          * multiple streaming readers/writers. Hence, we do not abort scanning
4644          * when the requested number of pages are reclaimed when scanning at
4645          * DEF_PRIORITY on the assumption that the fact we are direct
4646          * reclaiming implies that kswapd is not keeping up and it is best to
4647          * do a batch of work at once. For memcg reclaim one check is made to
4648          * abort proportional reclaim if either the file or anon lru has already
4649          * dropped to zero at the first pass.
4650          */
4651         scan_adjusted = (!cgroup_reclaim(sc) && !current_is_kswapd() &&
4652                          sc->priority == DEF_PRIORITY);
4653
4654         blk_start_plug(&plug);
4655         while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
4656                                         nr[LRU_INACTIVE_FILE]) {
4657                 unsigned long nr_anon, nr_file, percentage;
4658                 unsigned long nr_scanned;
4659
4660                 for_each_evictable_lru(lru) {
4661                         if (nr[lru]) {
4662                                 nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
4663                                 nr[lru] -= nr_to_scan;
4664
4665                                 nr_reclaimed += shrink_list(lru, nr_to_scan,
4666                                                             lruvec, sc);
4667                         }
4668                 }
4669
4670                 cond_resched();
4671
4672                 if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
4673                         continue;
4674
4675                 /*
4676                  * For kswapd and memcg, reclaim at least the number of pages
4677                  * requested. Ensure that the anon and file LRUs are scanned
4678                  * proportionally what was requested by get_scan_count(). We
4679                  * stop reclaiming one LRU and reduce the amount scanning
4680                  * proportional to the original scan target.
4681                  */
4682                 nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
4683                 nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
4684
4685                 /*
4686                  * It's just vindictive to attack the larger once the smaller
4687                  * has gone to zero.  And given the way we stop scanning the
4688                  * smaller below, this makes sure that we only make one nudge
4689                  * towards proportionality once we've got nr_to_reclaim.
4690                  */
4691                 if (!nr_file || !nr_anon)
4692                         break;
4693
4694                 if (nr_file > nr_anon) {
4695                         unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
4696                                                 targets[LRU_ACTIVE_ANON] + 1;
4697                         lru = LRU_BASE;
4698                         percentage = nr_anon * 100 / scan_target;
4699                 } else {
4700                         unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
4701                                                 targets[LRU_ACTIVE_FILE] + 1;
4702                         lru = LRU_FILE;
4703                         percentage = nr_file * 100 / scan_target;
4704                 }
4705
4706                 /* Stop scanning the smaller of the LRU */
4707                 nr[lru] = 0;
4708                 nr[lru + LRU_ACTIVE] = 0;
4709
4710                 /*
4711                  * Recalculate the other LRU scan count based on its original
4712                  * scan target and the percentage scanning already complete
4713                  */
4714                 lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
4715                 nr_scanned = targets[lru] - nr[lru];
4716                 nr[lru] = targets[lru] * (100 - percentage) / 100;
4717                 nr[lru] -= min(nr[lru], nr_scanned);
4718
4719                 lru += LRU_ACTIVE;
4720                 nr_scanned = targets[lru] - nr[lru];
4721                 nr[lru] = targets[lru] * (100 - percentage) / 100;
4722                 nr[lru] -= min(nr[lru], nr_scanned);
4723
4724                 scan_adjusted = true;
4725         }
4726         blk_finish_plug(&plug);
4727         sc->nr_reclaimed += nr_reclaimed;
4728
4729         /*
4730          * Even if we did not try to evict anon pages at all, we want to
4731          * rebalance the anon lru active/inactive ratio.
4732          */
4733         if (total_swap_pages && inactive_is_low(lruvec, LRU_INACTIVE_ANON))
4734                 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
4735                                    sc, LRU_ACTIVE_ANON);
4736 }
4737
4738 /* Use reclaim/compaction for costly allocs or under memory pressure */
4739 static bool in_reclaim_compaction(struct scan_control *sc)
4740 {
4741         if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
4742                         (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
4743                          sc->priority < DEF_PRIORITY - 2))
4744                 return true;
4745
4746         return false;
4747 }
4748
4749 /*
4750  * Reclaim/compaction is used for high-order allocation requests. It reclaims
4751  * order-0 pages before compacting the zone. should_continue_reclaim() returns
4752  * true if more pages should be reclaimed such that when the page allocator
4753  * calls try_to_compact_zone() that it will have enough free pages to succeed.
4754  * It will give up earlier than that if there is difficulty reclaiming pages.
4755  */
4756 static inline bool should_continue_reclaim(struct pglist_data *pgdat,
4757                                         unsigned long nr_reclaimed,
4758                                         struct scan_control *sc)
4759 {
4760         unsigned long pages_for_compaction;
4761         unsigned long inactive_lru_pages;
4762         int z;
4763
4764         /* If not in reclaim/compaction mode, stop */
4765         if (!in_reclaim_compaction(sc))
4766                 return false;
4767
4768         /*
4769          * Stop if we failed to reclaim any pages from the last SWAP_CLUSTER_MAX
4770          * number of pages that were scanned. This will return to the caller
4771          * with the risk reclaim/compaction and the resulting allocation attempt
4772          * fails. In the past we have tried harder for __GFP_RETRY_MAYFAIL
4773          * allocations through requiring that the full LRU list has been scanned
4774          * first, by assuming that zero delta of sc->nr_scanned means full LRU
4775          * scan, but that approximation was wrong, and there were corner cases
4776          * where always a non-zero amount of pages were scanned.
4777          */
4778         if (!nr_reclaimed)
4779                 return false;
4780
4781         /* If compaction would go ahead or the allocation would succeed, stop */
4782         for (z = 0; z <= sc->reclaim_idx; z++) {
4783                 struct zone *zone = &pgdat->node_zones[z];
4784                 if (!managed_zone(zone))
4785                         continue;
4786
4787                 switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
4788                 case COMPACT_SUCCESS:
4789                 case COMPACT_CONTINUE:
4790                         return false;
4791                 default:
4792                         /* check next zone */
4793                         ;
4794                 }
4795         }
4796
4797         /*
4798          * If we have not reclaimed enough pages for compaction and the
4799          * inactive lists are large enough, continue reclaiming
4800          */
4801         pages_for_compaction = compact_gap(sc->order);
4802         inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
4803         if (get_nr_swap_pages() > 0)
4804                 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
4805
4806         return inactive_lru_pages > pages_for_compaction;
4807 }
4808
4809 static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
4810 {
4811         struct mem_cgroup *target_memcg = sc->target_mem_cgroup;
4812         struct mem_cgroup *memcg;
4813
4814         memcg = mem_cgroup_iter(target_memcg, NULL, NULL);
4815         do {
4816                 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
4817                 unsigned long reclaimed;
4818                 unsigned long scanned;
4819
4820                 mem_cgroup_calculate_protection(target_memcg, memcg);
4821
4822                 if (mem_cgroup_below_min(memcg)) {
4823                         /*
4824                          * Hard protection.
4825                          * If there is no reclaimable memory, OOM.
4826                          */
4827                         continue;
4828                 } else if (mem_cgroup_below_low(memcg)) {
4829                         /*
4830                          * Soft protection.
4831                          * Respect the protection only as long as
4832                          * there is an unprotected supply
4833                          * of reclaimable memory from other cgroups.
4834                          */
4835                         if (!sc->memcg_low_reclaim) {
4836                                 sc->memcg_low_skipped = 1;
4837                                 continue;
4838                         }
4839                         memcg_memory_event(memcg, MEMCG_LOW);
4840                 }
4841
4842                 reclaimed = sc->nr_reclaimed;
4843                 scanned = sc->nr_scanned;
4844
4845                 shrink_lruvec(lruvec, sc);
4846
4847                 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
4848                             sc->priority);
4849
4850                 /* Record the group's reclaim efficiency */
4851                 vmpressure(sc->gfp_mask, memcg, false,
4852                            sc->nr_scanned - scanned,
4853                            sc->nr_reclaimed - reclaimed);
4854
4855         } while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL)));
4856 }
4857
4858 static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
4859 {
4860         struct reclaim_state *reclaim_state = current->reclaim_state;
4861         unsigned long nr_reclaimed, nr_scanned;
4862         struct lruvec *target_lruvec;
4863         bool reclaimable = false;
4864
4865         target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
4866
4867 again:
4868         memset(&sc->nr, 0, sizeof(sc->nr));
4869
4870         nr_reclaimed = sc->nr_reclaimed;
4871         nr_scanned = sc->nr_scanned;
4872
4873         prepare_scan_count(pgdat, sc);
4874
4875         shrink_node_memcgs(pgdat, sc);
4876
4877         if (reclaim_state) {
4878                 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
4879                 reclaim_state->reclaimed_slab = 0;
4880         }
4881
4882         /* Record the subtree's reclaim efficiency */
4883         vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
4884                    sc->nr_scanned - nr_scanned,
4885                    sc->nr_reclaimed - nr_reclaimed);
4886
4887         if (sc->nr_reclaimed - nr_reclaimed)
4888                 reclaimable = true;
4889
4890         if (current_is_kswapd()) {
4891                 /*
4892                  * If reclaim is isolating dirty pages under writeback,
4893                  * it implies that the long-lived page allocation rate
4894                  * is exceeding the page laundering rate. Either the
4895                  * global limits are not being effective at throttling
4896                  * processes due to the page distribution throughout
4897                  * zones or there is heavy usage of a slow backing
4898                  * device. The only option is to throttle from reclaim
4899                  * context which is not ideal as there is no guarantee
4900                  * the dirtying process is throttled in the same way
4901                  * balance_dirty_pages() manages.
4902                  *
4903                  * Once a node is flagged PGDAT_WRITEBACK, kswapd will
4904                  * count the number of pages under pages flagged for
4905                  * immediate reclaim and stall if any are encountered
4906                  * in the nr_immediate check below.
4907                  */
4908                 if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken)
4909                         set_bit(PGDAT_WRITEBACK, &pgdat->flags);
4910
4911                 /* Allow kswapd to start writing pages during reclaim.*/
4912                 if (sc->nr.unqueued_dirty == sc->nr.file_taken)
4913                         set_bit(PGDAT_DIRTY, &pgdat->flags);
4914
4915                 /*
4916                  * If kswapd scans pages marked marked for immediate
4917                  * reclaim and under writeback (nr_immediate), it
4918                  * implies that pages are cycling through the LRU
4919                  * faster than they are written so also forcibly stall.
4920                  */
4921                 if (sc->nr.immediate)
4922                         congestion_wait(BLK_RW_ASYNC, HZ/10);
4923         }
4924
4925         /*
4926          * Tag a node/memcg as congested if all the dirty pages
4927          * scanned were backed by a congested BDI and
4928          * wait_iff_congested will stall.
4929          *
4930          * Legacy memcg will stall in page writeback so avoid forcibly
4931          * stalling in wait_iff_congested().
4932          */
4933         if ((current_is_kswapd() ||
4934              (cgroup_reclaim(sc) && writeback_throttling_sane(sc))) &&
4935             sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
4936                 set_bit(LRUVEC_CONGESTED, &target_lruvec->flags);
4937
4938         /*
4939          * Stall direct reclaim for IO completions if underlying BDIs
4940          * and node is congested. Allow kswapd to continue until it
4941          * starts encountering unqueued dirty pages or cycling through
4942          * the LRU too quickly.
4943          */
4944         if (!current_is_kswapd() && current_may_throttle() &&
4945             !sc->hibernation_mode &&
4946             test_bit(LRUVEC_CONGESTED, &target_lruvec->flags))
4947                 wait_iff_congested(BLK_RW_ASYNC, HZ/10);
4948
4949         if (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
4950                                     sc))
4951                 goto again;
4952
4953         /*
4954          * Kswapd gives up on balancing particular nodes after too
4955          * many failures to reclaim anything from them and goes to
4956          * sleep. On reclaim progress, reset the failure counter. A
4957          * successful direct reclaim run will revive a dormant kswapd.
4958          */
4959         if (reclaimable)
4960                 pgdat->kswapd_failures = 0;
4961
4962         return reclaimable;
4963 }
4964
4965 /*
4966  * Returns true if compaction should go ahead for a costly-order request, or
4967  * the allocation would already succeed without compaction. Return false if we
4968  * should reclaim first.
4969  */
4970 static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
4971 {
4972         unsigned long watermark;
4973         enum compact_result suitable;
4974
4975         suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx);
4976         if (suitable == COMPACT_SUCCESS)
4977                 /* Allocation should succeed already. Don't reclaim. */
4978                 return true;
4979         if (suitable == COMPACT_SKIPPED)
4980                 /* Compaction cannot yet proceed. Do reclaim. */
4981                 return false;
4982
4983         /*
4984          * Compaction is already possible, but it takes time to run and there
4985          * are potentially other callers using the pages just freed. So proceed
4986          * with reclaim to make a buffer of free pages available to give
4987          * compaction a reasonable chance of completing and allocating the page.
4988          * Note that we won't actually reclaim the whole buffer in one attempt
4989          * as the target watermark in should_continue_reclaim() is lower. But if
4990          * we are already above the high+gap watermark, don't reclaim at all.
4991          */
4992         watermark = high_wmark_pages(zone) + compact_gap(sc->order);
4993
4994         return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
4995 }
4996
4997 /*
4998  * This is the direct reclaim path, for page-allocating processes.  We only
4999  * try to reclaim pages from zones which will satisfy the caller's allocation
5000  * request.
5001  *
5002  * If a zone is deemed to be full of pinned pages then just give it a light
5003  * scan then give up on it.
5004  */
5005 static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
5006 {
5007         struct zoneref *z;
5008         struct zone *zone;
5009         unsigned long nr_soft_reclaimed;
5010         unsigned long nr_soft_scanned;
5011         gfp_t orig_mask;
5012         pg_data_t *last_pgdat = NULL;
5013
5014         /*
5015          * If the number of buffer_heads in the machine exceeds the maximum
5016          * allowed level, force direct reclaim to scan the highmem zone as
5017          * highmem pages could be pinning lowmem pages storing buffer_heads
5018          */
5019         orig_mask = sc->gfp_mask;
5020         if (buffer_heads_over_limit) {
5021                 sc->gfp_mask |= __GFP_HIGHMEM;
5022                 sc->reclaim_idx = gfp_zone(sc->gfp_mask);
5023         }
5024
5025         for_each_zone_zonelist_nodemask(zone, z, zonelist,
5026                                         sc->reclaim_idx, sc->nodemask) {
5027                 /*
5028                  * Take care memory controller reclaiming has small influence
5029                  * to global LRU.
5030                  */
5031                 if (!cgroup_reclaim(sc)) {
5032                         if (!cpuset_zone_allowed(zone,
5033                                                  GFP_KERNEL | __GFP_HARDWALL))
5034                                 continue;
5035
5036                         /*
5037                          * If we already have plenty of memory free for
5038                          * compaction in this zone, don't free any more.
5039                          * Even though compaction is invoked for any
5040                          * non-zero order, only frequent costly order
5041                          * reclamation is disruptive enough to become a
5042                          * noticeable problem, like transparent huge
5043                          * page allocations.
5044                          */
5045                         if (IS_ENABLED(CONFIG_COMPACTION) &&
5046                             sc->order > PAGE_ALLOC_COSTLY_ORDER &&
5047                             compaction_ready(zone, sc)) {
5048                                 sc->compaction_ready = true;
5049                                 continue;
5050                         }
5051
5052                         /*
5053                          * Shrink each node in the zonelist once. If the
5054                          * zonelist is ordered by zone (not the default) then a
5055                          * node may be shrunk multiple times but in that case
5056                          * the user prefers lower zones being preserved.
5057                          */
5058                         if (zone->zone_pgdat == last_pgdat)
5059                                 continue;
5060
5061                         /*
5062                          * This steals pages from memory cgroups over softlimit
5063                          * and returns the number of reclaimed pages and
5064                          * scanned pages. This works for global memory pressure
5065                          * and balancing, not for a memcg's limit.
5066                          */
5067                         nr_soft_scanned = 0;
5068                         nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat,
5069                                                 sc->order, sc->gfp_mask,
5070                                                 &nr_soft_scanned);
5071                         sc->nr_reclaimed += nr_soft_reclaimed;
5072                         sc->nr_scanned += nr_soft_scanned;
5073                         /* need some check for avoid more shrink_zone() */
5074                 }
5075
5076                 /* See comment about same check for global reclaim above */
5077                 if (zone->zone_pgdat == last_pgdat)
5078                         continue;
5079                 last_pgdat = zone->zone_pgdat;
5080                 shrink_node(zone->zone_pgdat, sc);
5081         }
5082
5083         /*
5084          * Restore to original mask to avoid the impact on the caller if we
5085          * promoted it to __GFP_HIGHMEM.
5086          */
5087         sc->gfp_mask = orig_mask;
5088 }
5089
5090 static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
5091 {
5092         struct lruvec *target_lruvec;
5093         unsigned long refaults;
5094
5095         if (lru_gen_enabled())
5096                 return;
5097
5098         target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
5099         refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE);
5100         target_lruvec->refaults = refaults;
5101 }
5102
5103 /*
5104  * This is the main entry point to direct page reclaim.
5105  *
5106  * If a full scan of the inactive list fails to free enough memory then we
5107  * are "out of memory" and something needs to be killed.
5108  *
5109  * If the caller is !__GFP_FS then the probability of a failure is reasonably
5110  * high - the zone may be full of dirty or under-writeback pages, which this
5111  * caller can't do much about.  We kick the writeback threads and take explicit
5112  * naps in the hope that some of these pages can be written.  But if the
5113  * allocating task holds filesystem locks which prevent writeout this might not
5114  * work, and the allocation attempt will fail.
5115  *
5116  * returns:     0, if no pages reclaimed
5117  *              else, the number of pages reclaimed
5118  */
5119 static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
5120                                           struct scan_control *sc)
5121 {
5122         int initial_priority = sc->priority;
5123         pg_data_t *last_pgdat;
5124         struct zoneref *z;
5125         struct zone *zone;
5126 retry:
5127         delayacct_freepages_start();
5128
5129         if (!cgroup_reclaim(sc))
5130                 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
5131
5132         do {
5133                 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
5134                                 sc->priority);
5135                 sc->nr_scanned = 0;
5136                 shrink_zones(zonelist, sc);
5137
5138                 if (sc->nr_reclaimed >= sc->nr_to_reclaim)
5139                         break;
5140
5141                 if (sc->compaction_ready)
5142                         break;
5143
5144                 /*
5145                  * If we're getting trouble reclaiming, start doing
5146                  * writepage even in laptop mode.
5147                  */
5148                 if (sc->priority < DEF_PRIORITY - 2)
5149                         sc->may_writepage = 1;
5150         } while (--sc->priority >= 0);
5151
5152         last_pgdat = NULL;
5153         for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx,
5154                                         sc->nodemask) {
5155                 if (zone->zone_pgdat == last_pgdat)
5156                         continue;
5157                 last_pgdat = zone->zone_pgdat;
5158
5159                 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat);
5160
5161                 if (cgroup_reclaim(sc)) {
5162                         struct lruvec *lruvec;
5163
5164                         lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup,
5165                                                    zone->zone_pgdat);
5166                         clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
5167                 }
5168         }
5169
5170         delayacct_freepages_end();
5171
5172         if (sc->nr_reclaimed)
5173                 return sc->nr_reclaimed;
5174
5175         /* Aborted reclaim to try compaction? don't OOM, then */
5176         if (sc->compaction_ready)
5177                 return 1;
5178
5179         /*
5180          * We make inactive:active ratio decisions based on the node's
5181          * composition of memory, but a restrictive reclaim_idx or a
5182          * memory.low cgroup setting can exempt large amounts of
5183          * memory from reclaim. Neither of which are very common, so
5184          * instead of doing costly eligibility calculations of the
5185          * entire cgroup subtree up front, we assume the estimates are
5186          * good, and retry with forcible deactivation if that fails.
5187          */
5188         if (sc->skipped_deactivate) {
5189                 sc->priority = initial_priority;
5190                 sc->force_deactivate = 1;
5191                 sc->skipped_deactivate = 0;
5192                 goto retry;
5193         }
5194
5195         /* Untapped cgroup reserves?  Don't OOM, retry. */
5196         if (sc->memcg_low_skipped) {
5197                 sc->priority = initial_priority;
5198                 sc->force_deactivate = 0;
5199                 sc->skipped_deactivate = 0;
5200                 sc->memcg_low_reclaim = 1;
5201                 sc->memcg_low_skipped = 0;
5202                 goto retry;
5203         }
5204
5205         return 0;
5206 }
5207
5208 static bool allow_direct_reclaim(pg_data_t *pgdat)
5209 {
5210         struct zone *zone;
5211         unsigned long pfmemalloc_reserve = 0;
5212         unsigned long free_pages = 0;
5213         int i;
5214         bool wmark_ok;
5215
5216         if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
5217                 return true;
5218
5219         for (i = 0; i <= ZONE_NORMAL; i++) {
5220                 zone = &pgdat->node_zones[i];
5221                 if (!managed_zone(zone))
5222                         continue;
5223
5224                 if (!zone_reclaimable_pages(zone))
5225                         continue;
5226
5227                 pfmemalloc_reserve += min_wmark_pages(zone);
5228                 free_pages += zone_page_state(zone, NR_FREE_PAGES);
5229         }
5230
5231         /* If there are no reserves (unexpected config) then do not throttle */
5232         if (!pfmemalloc_reserve)
5233                 return true;
5234
5235         wmark_ok = free_pages > pfmemalloc_reserve / 2;
5236
5237         /* kswapd must be awake if processes are being throttled */
5238         if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
5239                 pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx,
5240                                                 (enum zone_type)ZONE_NORMAL);
5241                 wake_up_interruptible(&pgdat->kswapd_wait);
5242         }
5243
5244         return wmark_ok;
5245 }
5246
5247 /*
5248  * Throttle direct reclaimers if backing storage is backed by the network
5249  * and the PFMEMALLOC reserve for the preferred node is getting dangerously
5250  * depleted. kswapd will continue to make progress and wake the processes
5251  * when the low watermark is reached.
5252  *
5253  * Returns true if a fatal signal was delivered during throttling. If this
5254  * happens, the page allocator should not consider triggering the OOM killer.
5255  */
5256 static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
5257                                         nodemask_t *nodemask)
5258 {
5259         struct zoneref *z;
5260         struct zone *zone;
5261         pg_data_t *pgdat = NULL;
5262
5263         /*
5264          * Kernel threads should not be throttled as they may be indirectly
5265          * responsible for cleaning pages necessary for reclaim to make forward
5266          * progress. kjournald for example may enter direct reclaim while
5267          * committing a transaction where throttling it could forcing other
5268          * processes to block on log_wait_commit().
5269          */
5270         if (current->flags & PF_KTHREAD)
5271                 goto out;
5272
5273         /*
5274          * If a fatal signal is pending, this process should not throttle.
5275          * It should return quickly so it can exit and free its memory
5276          */
5277         if (fatal_signal_pending(current))
5278                 goto out;
5279
5280         /*
5281          * Check if the pfmemalloc reserves are ok by finding the first node
5282          * with a usable ZONE_NORMAL or lower zone. The expectation is that
5283          * GFP_KERNEL will be required for allocating network buffers when
5284          * swapping over the network so ZONE_HIGHMEM is unusable.
5285          *
5286          * Throttling is based on the first usable node and throttled processes
5287          * wait on a queue until kswapd makes progress and wakes them. There
5288          * is an affinity then between processes waking up and where reclaim
5289          * progress has been made assuming the process wakes on the same node.
5290          * More importantly, processes running on remote nodes will not compete
5291          * for remote pfmemalloc reserves and processes on different nodes
5292          * should make reasonable progress.
5293          */
5294         for_each_zone_zonelist_nodemask(zone, z, zonelist,
5295                                         gfp_zone(gfp_mask), nodemask) {
5296                 if (zone_idx(zone) > ZONE_NORMAL)
5297                         continue;
5298
5299                 /* Throttle based on the first usable node */
5300                 pgdat = zone->zone_pgdat;
5301                 if (allow_direct_reclaim(pgdat))
5302                         goto out;
5303                 break;
5304         }
5305
5306         /* If no zone was usable by the allocation flags then do not throttle */
5307         if (!pgdat)
5308                 goto out;
5309
5310         /* Account for the throttling */
5311         count_vm_event(PGSCAN_DIRECT_THROTTLE);
5312
5313         /*
5314          * If the caller cannot enter the filesystem, it's possible that it
5315          * is due to the caller holding an FS lock or performing a journal
5316          * transaction in the case of a filesystem like ext[3|4]. In this case,
5317          * it is not safe to block on pfmemalloc_wait as kswapd could be
5318          * blocked waiting on the same lock. Instead, throttle for up to a
5319          * second before continuing.
5320          */
5321         if (!(gfp_mask & __GFP_FS)) {
5322                 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
5323                         allow_direct_reclaim(pgdat), HZ);
5324
5325                 goto check_pending;
5326         }
5327
5328         /* Throttle until kswapd wakes the process */
5329         wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
5330                 allow_direct_reclaim(pgdat));
5331
5332 check_pending:
5333         if (fatal_signal_pending(current))
5334                 return true;
5335
5336 out:
5337         return false;
5338 }
5339
5340 unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
5341                                 gfp_t gfp_mask, nodemask_t *nodemask)
5342 {
5343         unsigned long nr_reclaimed;
5344         struct scan_control sc = {
5345                 .nr_to_reclaim = SWAP_CLUSTER_MAX,
5346                 .gfp_mask = current_gfp_context(gfp_mask),
5347                 .reclaim_idx = gfp_zone(gfp_mask),
5348                 .order = order,
5349                 .nodemask = nodemask,
5350                 .priority = DEF_PRIORITY,
5351                 .may_writepage = !laptop_mode,
5352                 .may_unmap = 1,
5353                 .may_swap = 1,
5354         };
5355
5356         /*
5357          * scan_control uses s8 fields for order, priority, and reclaim_idx.
5358          * Confirm they are large enough for max values.
5359          */
5360         BUILD_BUG_ON(MAX_ORDER > S8_MAX);
5361         BUILD_BUG_ON(DEF_PRIORITY > S8_MAX);
5362         BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX);
5363
5364         /*
5365          * Do not enter reclaim if fatal signal was delivered while throttled.
5366          * 1 is returned so that the page allocator does not OOM kill at this
5367          * point.
5368          */
5369         if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
5370                 return 1;
5371
5372         set_task_reclaim_state(current, &sc.reclaim_state);
5373         trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
5374
5375         nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
5376
5377         trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
5378         set_task_reclaim_state(current, NULL);
5379
5380         return nr_reclaimed;
5381 }
5382
5383 #ifdef CONFIG_MEMCG
5384
5385 /* Only used by soft limit reclaim. Do not reuse for anything else. */
5386 unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
5387                                                 gfp_t gfp_mask, bool noswap,
5388                                                 pg_data_t *pgdat,
5389                                                 unsigned long *nr_scanned)
5390 {
5391         struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
5392         struct scan_control sc = {
5393                 .nr_to_reclaim = SWAP_CLUSTER_MAX,
5394                 .target_mem_cgroup = memcg,
5395                 .may_writepage = !laptop_mode,
5396                 .may_unmap = 1,
5397                 .reclaim_idx = MAX_NR_ZONES - 1,
5398                 .may_swap = !noswap,
5399         };
5400
5401         WARN_ON_ONCE(!current->reclaim_state);
5402
5403         sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
5404                         (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
5405
5406         trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
5407                                                       sc.gfp_mask);
5408
5409         /*
5410          * NOTE: Although we can get the priority field, using it
5411          * here is not a good idea, since it limits the pages we can scan.
5412          * if we don't reclaim here, the shrink_node from balance_pgdat
5413          * will pick up pages from other mem cgroup's as well. We hack
5414          * the priority and make it zero.
5415          */
5416         shrink_lruvec(lruvec, &sc);
5417
5418         trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
5419
5420         *nr_scanned = sc.nr_scanned;
5421
5422         return sc.nr_reclaimed;
5423 }
5424
5425 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
5426                                            unsigned long nr_pages,
5427                                            gfp_t gfp_mask,
5428                                            bool may_swap)
5429 {
5430         struct zonelist *zonelist;
5431         unsigned long nr_reclaimed;
5432         unsigned long pflags;
5433         int nid;
5434         unsigned int noreclaim_flag;
5435         struct scan_control sc = {
5436                 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
5437                 .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) |
5438                                 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
5439                 .reclaim_idx = MAX_NR_ZONES - 1,
5440                 .target_mem_cgroup = memcg,
5441                 .priority = DEF_PRIORITY,
5442                 .may_writepage = !laptop_mode,
5443                 .may_unmap = 1,
5444                 .may_swap = may_swap,
5445         };
5446
5447         set_task_reclaim_state(current, &sc.reclaim_state);
5448         /*
5449          * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
5450          * take care of from where we get pages. So the node where we start the
5451          * scan does not need to be the current node.
5452          */
5453         nid = mem_cgroup_select_victim_node(memcg);
5454
5455         zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
5456
5457         trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
5458
5459         psi_memstall_enter(&pflags);
5460         noreclaim_flag = memalloc_noreclaim_save();
5461
5462         nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
5463
5464         memalloc_noreclaim_restore(noreclaim_flag);
5465         psi_memstall_leave(&pflags);
5466
5467         trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
5468         set_task_reclaim_state(current, NULL);
5469
5470         return nr_reclaimed;
5471 }
5472 #endif
5473
5474 static void kswapd_age_node(struct pglist_data *pgdat, struct scan_control *sc)
5475 {
5476         struct mem_cgroup *memcg;
5477         struct lruvec *lruvec;
5478
5479         if (lru_gen_enabled()) {
5480                 lru_gen_age_node(pgdat, sc);
5481                 return;
5482         }
5483
5484         /* FIXME? */
5485         if (!total_swap_pages)
5486                 return;
5487
5488         lruvec = mem_cgroup_lruvec(NULL, pgdat);
5489         if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON))
5490                 return;
5491
5492         memcg = mem_cgroup_iter(NULL, NULL, NULL);
5493         do {
5494                 lruvec = mem_cgroup_lruvec(memcg, pgdat);
5495                 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
5496                                    sc, LRU_ACTIVE_ANON);
5497                 memcg = mem_cgroup_iter(NULL, memcg, NULL);
5498         } while (memcg);
5499 }
5500
5501 static bool pgdat_watermark_boosted(pg_data_t *pgdat, int classzone_idx)
5502 {
5503         int i;
5504         struct zone *zone;
5505
5506         /*
5507          * Check for watermark boosts top-down as the higher zones
5508          * are more likely to be boosted. Both watermarks and boosts
5509          * should not be checked at the time time as reclaim would
5510          * start prematurely when there is no boosting and a lower
5511          * zone is balanced.
5512          */
5513         for (i = classzone_idx; i >= 0; i--) {
5514                 zone = pgdat->node_zones + i;
5515                 if (!managed_zone(zone))
5516                         continue;
5517
5518                 if (zone->watermark_boost)
5519                         return true;
5520         }
5521
5522         return false;
5523 }
5524
5525 /*
5526  * Returns true if there is an eligible zone balanced for the request order
5527  * and classzone_idx
5528  */
5529 static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
5530 {
5531         int i;
5532         unsigned long mark = -1;
5533         struct zone *zone;
5534
5535         /*
5536          * Check watermarks bottom-up as lower zones are more likely to
5537          * meet watermarks.
5538          */
5539         for (i = 0; i <= classzone_idx; i++) {
5540                 zone = pgdat->node_zones + i;
5541
5542                 if (!managed_zone(zone))
5543                         continue;
5544
5545                 mark = high_wmark_pages(zone);
5546                 if (zone_watermark_ok_safe(zone, order, mark, classzone_idx))
5547                         return true;
5548         }
5549
5550         /*
5551          * If a node has no populated zone within classzone_idx, it does not
5552          * need balancing by definition. This can happen if a zone-restricted
5553          * allocation tries to wake a remote kswapd.
5554          */
5555         if (mark == -1)
5556                 return true;
5557
5558         return false;
5559 }
5560
5561 /* Clear pgdat state for congested, dirty or under writeback. */
5562 static void clear_pgdat_congested(pg_data_t *pgdat)
5563 {
5564         struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat);
5565
5566         clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
5567         clear_bit(PGDAT_DIRTY, &pgdat->flags);
5568         clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
5569 }
5570
5571 /*
5572  * Prepare kswapd for sleeping. This verifies that there are no processes
5573  * waiting in throttle_direct_reclaim() and that watermarks have been met.
5574  *
5575  * Returns true if kswapd is ready to sleep
5576  */
5577 static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx)
5578 {
5579         /*
5580          * The throttled processes are normally woken up in balance_pgdat() as
5581          * soon as allow_direct_reclaim() is true. But there is a potential
5582          * race between when kswapd checks the watermarks and a process gets
5583          * throttled. There is also a potential race if processes get
5584          * throttled, kswapd wakes, a large process exits thereby balancing the
5585          * zones, which causes kswapd to exit balance_pgdat() before reaching
5586          * the wake up checks. If kswapd is going to sleep, no process should
5587          * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
5588          * the wake up is premature, processes will wake kswapd and get
5589          * throttled again. The difference from wake ups in balance_pgdat() is
5590          * that here we are under prepare_to_wait().
5591          */
5592         if (waitqueue_active(&pgdat->pfmemalloc_wait))
5593                 wake_up_all(&pgdat->pfmemalloc_wait);
5594
5595         /* Hopeless node, leave it to direct reclaim */
5596         if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
5597                 return true;
5598
5599         if (pgdat_balanced(pgdat, order, classzone_idx)) {
5600                 clear_pgdat_congested(pgdat);
5601                 return true;
5602         }
5603
5604         return false;
5605 }
5606
5607 /*
5608  * kswapd shrinks a node of pages that are at or below the highest usable
5609  * zone that is currently unbalanced.
5610  *
5611  * Returns true if kswapd scanned at least the requested number of pages to
5612  * reclaim or if the lack of progress was due to pages under writeback.
5613  * This is used to determine if the scanning priority needs to be raised.
5614  */
5615 static bool kswapd_shrink_node(pg_data_t *pgdat,
5616                                struct scan_control *sc)
5617 {
5618         struct zone *zone;
5619         int z;
5620
5621         /* Reclaim a number of pages proportional to the number of zones */
5622         sc->nr_to_reclaim = 0;
5623         for (z = 0; z <= sc->reclaim_idx; z++) {
5624                 zone = pgdat->node_zones + z;
5625                 if (!managed_zone(zone))
5626                         continue;
5627
5628                 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
5629         }
5630
5631         /*
5632          * Historically care was taken to put equal pressure on all zones but
5633          * now pressure is applied based on node LRU order.
5634          */
5635         shrink_node(pgdat, sc);
5636
5637         /*
5638          * Fragmentation may mean that the system cannot be rebalanced for
5639          * high-order allocations. If twice the allocation size has been
5640          * reclaimed then recheck watermarks only at order-0 to prevent
5641          * excessive reclaim. Assume that a process requested a high-order
5642          * can direct reclaim/compact.
5643          */
5644         if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order))
5645                 sc->order = 0;
5646
5647         return sc->nr_scanned >= sc->nr_to_reclaim;
5648 }
5649
5650 /*
5651  * For kswapd, balance_pgdat() will reclaim pages across a node from zones
5652  * that are eligible for use by the caller until at least one zone is
5653  * balanced.
5654  *
5655  * Returns the order kswapd finished reclaiming at.
5656  *
5657  * kswapd scans the zones in the highmem->normal->dma direction.  It skips
5658  * zones which have free_pages > high_wmark_pages(zone), but once a zone is
5659  * found to have free_pages <= high_wmark_pages(zone), any page in that zone
5660  * or lower is eligible for reclaim until at least one usable zone is
5661  * balanced.
5662  */
5663 static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
5664 {
5665         int i;
5666         unsigned long nr_soft_reclaimed;
5667         unsigned long nr_soft_scanned;
5668         unsigned long pflags;
5669         unsigned long nr_boost_reclaim;
5670         unsigned long zone_boosts[MAX_NR_ZONES] = { 0, };
5671         bool boosted;
5672         struct zone *zone;
5673         struct scan_control sc = {
5674                 .gfp_mask = GFP_KERNEL,
5675                 .order = order,
5676                 .may_unmap = 1,
5677         };
5678
5679         set_task_reclaim_state(current, &sc.reclaim_state);
5680         psi_memstall_enter(&pflags);
5681         __fs_reclaim_acquire();
5682
5683         count_vm_event(PAGEOUTRUN);
5684
5685         /*
5686          * Account for the reclaim boost. Note that the zone boost is left in
5687          * place so that parallel allocations that are near the watermark will
5688          * stall or direct reclaim until kswapd is finished.
5689          */
5690         nr_boost_reclaim = 0;
5691         for (i = 0; i <= classzone_idx; i++) {
5692                 zone = pgdat->node_zones + i;
5693                 if (!managed_zone(zone))
5694                         continue;
5695
5696                 nr_boost_reclaim += zone->watermark_boost;
5697                 zone_boosts[i] = zone->watermark_boost;
5698         }
5699         boosted = nr_boost_reclaim;
5700
5701 restart:
5702         sc.priority = DEF_PRIORITY;
5703         do {
5704                 unsigned long nr_reclaimed = sc.nr_reclaimed;
5705                 bool raise_priority = true;
5706                 bool balanced;
5707                 bool ret;
5708
5709                 sc.reclaim_idx = classzone_idx;
5710
5711                 /*
5712                  * If the number of buffer_heads exceeds the maximum allowed
5713                  * then consider reclaiming from all zones. This has a dual
5714                  * purpose -- on 64-bit systems it is expected that
5715                  * buffer_heads are stripped during active rotation. On 32-bit
5716                  * systems, highmem pages can pin lowmem memory and shrinking
5717                  * buffers can relieve lowmem pressure. Reclaim may still not
5718                  * go ahead if all eligible zones for the original allocation
5719                  * request are balanced to avoid excessive reclaim from kswapd.
5720                  */
5721                 if (buffer_heads_over_limit) {
5722                         for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
5723                                 zone = pgdat->node_zones + i;
5724                                 if (!managed_zone(zone))
5725                                         continue;
5726
5727                                 sc.reclaim_idx = i;
5728                                 break;
5729                         }
5730                 }
5731
5732                 /*
5733                  * If the pgdat is imbalanced then ignore boosting and preserve
5734                  * the watermarks for a later time and restart. Note that the
5735                  * zone watermarks will be still reset at the end of balancing
5736                  * on the grounds that the normal reclaim should be enough to
5737                  * re-evaluate if boosting is required when kswapd next wakes.
5738                  */
5739                 balanced = pgdat_balanced(pgdat, sc.order, classzone_idx);
5740                 if (!balanced && nr_boost_reclaim) {
5741                         nr_boost_reclaim = 0;
5742                         goto restart;
5743                 }
5744
5745                 /*
5746                  * If boosting is not active then only reclaim if there are no
5747                  * eligible zones. Note that sc.reclaim_idx is not used as
5748                  * buffer_heads_over_limit may have adjusted it.
5749                  */
5750                 if (!nr_boost_reclaim && balanced)
5751                         goto out;
5752
5753                 /* Limit the priority of boosting to avoid reclaim writeback */
5754                 if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2)
5755                         raise_priority = false;
5756
5757                 /*
5758                  * Do not writeback or swap pages for boosted reclaim. The
5759                  * intent is to relieve pressure not issue sub-optimal IO
5760                  * from reclaim context. If no pages are reclaimed, the
5761                  * reclaim will be aborted.
5762                  */
5763                 sc.may_writepage = !laptop_mode && !nr_boost_reclaim;
5764                 sc.may_swap = !nr_boost_reclaim;
5765
5766                 /*
5767                  * Do some background aging, to give pages a chance to be
5768                  * referenced before reclaiming. All pages are rotated
5769                  * regardless of classzone as this is about consistent aging.
5770                  */
5771                 kswapd_age_node(pgdat, &sc);
5772
5773                 /*
5774                  * If we're getting trouble reclaiming, start doing writepage
5775                  * even in laptop mode.
5776                  */
5777                 if (sc.priority < DEF_PRIORITY - 2)
5778                         sc.may_writepage = 1;
5779
5780                 /* Call soft limit reclaim before calling shrink_node. */
5781                 sc.nr_scanned = 0;
5782                 nr_soft_scanned = 0;
5783                 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order,
5784                                                 sc.gfp_mask, &nr_soft_scanned);
5785                 sc.nr_reclaimed += nr_soft_reclaimed;
5786
5787                 /*
5788                  * There should be no need to raise the scanning priority if
5789                  * enough pages are already being scanned that that high
5790                  * watermark would be met at 100% efficiency.
5791                  */
5792                 if (kswapd_shrink_node(pgdat, &sc))
5793                         raise_priority = false;
5794
5795                 /*
5796                  * If the low watermark is met there is no need for processes
5797                  * to be throttled on pfmemalloc_wait as they should not be
5798                  * able to safely make forward progress. Wake them
5799                  */
5800                 if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
5801                                 allow_direct_reclaim(pgdat))
5802                         wake_up_all(&pgdat->pfmemalloc_wait);
5803
5804                 /* Check if kswapd should be suspending */
5805                 __fs_reclaim_release();
5806                 ret = try_to_freeze();
5807                 __fs_reclaim_acquire();
5808                 if (ret || kthread_should_stop())
5809                         break;
5810
5811                 /*
5812                  * Raise priority if scanning rate is too low or there was no
5813                  * progress in reclaiming pages
5814                  */
5815                 nr_reclaimed = sc.nr_reclaimed - nr_reclaimed;
5816                 nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed);
5817
5818                 /*
5819                  * If reclaim made no progress for a boost, stop reclaim as
5820                  * IO cannot be queued and it could be an infinite loop in
5821                  * extreme circumstances.
5822                  */
5823                 if (nr_boost_reclaim && !nr_reclaimed)
5824                         break;
5825
5826                 if (raise_priority || !nr_reclaimed)
5827                         sc.priority--;
5828         } while (sc.priority >= 1);
5829
5830         if (!sc.nr_reclaimed)
5831                 pgdat->kswapd_failures++;
5832
5833 out:
5834         /* If reclaim was boosted, account for the reclaim done in this pass */
5835         if (boosted) {
5836                 unsigned long flags;
5837
5838                 for (i = 0; i <= classzone_idx; i++) {
5839                         if (!zone_boosts[i])
5840                                 continue;
5841
5842                         /* Increments are under the zone lock */
5843                         zone = pgdat->node_zones + i;
5844                         spin_lock_irqsave(&zone->lock, flags);
5845                         zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]);
5846                         spin_unlock_irqrestore(&zone->lock, flags);
5847                 }
5848
5849                 /*
5850                  * As there is now likely space, wakeup kcompact to defragment
5851                  * pageblocks.
5852                  */
5853                 wakeup_kcompactd(pgdat, pageblock_order, classzone_idx);
5854         }
5855
5856         snapshot_refaults(NULL, pgdat);
5857         __fs_reclaim_release();
5858         psi_memstall_leave(&pflags);
5859         set_task_reclaim_state(current, NULL);
5860
5861         /*
5862          * Return the order kswapd stopped reclaiming at as
5863          * prepare_kswapd_sleep() takes it into account. If another caller
5864          * entered the allocator slow path while kswapd was awake, order will
5865          * remain at the higher level.
5866          */
5867         return sc.order;
5868 }
5869
5870 /*
5871  * The pgdat->kswapd_classzone_idx is used to pass the highest zone index to be
5872  * reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is not
5873  * a valid index then either kswapd runs for first time or kswapd couldn't sleep
5874  * after previous reclaim attempt (node is still unbalanced). In that case
5875  * return the zone index of the previous kswapd reclaim cycle.
5876  */
5877 static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat,
5878                                            enum zone_type prev_classzone_idx)
5879 {
5880         if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
5881                 return prev_classzone_idx;
5882         return pgdat->kswapd_classzone_idx;
5883 }
5884
5885 static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
5886                                 unsigned int classzone_idx)
5887 {
5888         long remaining = 0;
5889         DEFINE_WAIT(wait);
5890
5891         if (freezing(current) || kthread_should_stop())
5892                 return;
5893
5894         prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
5895
5896         /*
5897          * Try to sleep for a short interval. Note that kcompactd will only be
5898          * woken if it is possible to sleep for a short interval. This is
5899          * deliberate on the assumption that if reclaim cannot keep an
5900          * eligible zone balanced that it's also unlikely that compaction will
5901          * succeed.
5902          */
5903         if (prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) {
5904                 /*
5905                  * Compaction records what page blocks it recently failed to
5906                  * isolate pages from and skips them in the future scanning.
5907                  * When kswapd is going to sleep, it is reasonable to assume
5908                  * that pages and compaction may succeed so reset the cache.
5909                  */
5910                 reset_isolation_suitable(pgdat);
5911
5912                 /*
5913                  * We have freed the memory, now we should compact it to make
5914                  * allocation of the requested order possible.
5915                  */
5916                 wakeup_kcompactd(pgdat, alloc_order, classzone_idx);
5917
5918                 remaining = schedule_timeout(HZ/10);
5919
5920                 /*
5921                  * If woken prematurely then reset kswapd_classzone_idx and
5922                  * order. The values will either be from a wakeup request or
5923                  * the previous request that slept prematurely.
5924                  */
5925                 if (remaining) {
5926                         pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
5927                         pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order);
5928                 }
5929
5930                 finish_wait(&pgdat->kswapd_wait, &wait);
5931                 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
5932         }
5933
5934         /*
5935          * After a short sleep, check if it was a premature sleep. If not, then
5936          * go fully to sleep until explicitly woken up.
5937          */
5938         if (!remaining &&
5939             prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) {
5940                 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
5941
5942                 /*
5943                  * vmstat counters are not perfectly accurate and the estimated
5944                  * value for counters such as NR_FREE_PAGES can deviate from the
5945                  * true value by nr_online_cpus * threshold. To avoid the zone
5946                  * watermarks being breached while under pressure, we reduce the
5947                  * per-cpu vmstat threshold while kswapd is awake and restore
5948                  * them before going back to sleep.
5949                  */
5950                 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
5951
5952                 if (!kthread_should_stop())
5953                         schedule();
5954
5955                 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
5956         } else {
5957                 if (remaining)
5958                         count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
5959                 else
5960                         count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
5961         }
5962         finish_wait(&pgdat->kswapd_wait, &wait);
5963 }
5964
5965 /*
5966  * The background pageout daemon, started as a kernel thread
5967  * from the init process.
5968  *
5969  * This basically trickles out pages so that we have _some_
5970  * free memory available even if there is no other activity
5971  * that frees anything up. This is needed for things like routing
5972  * etc, where we otherwise might have all activity going on in
5973  * asynchronous contexts that cannot page things out.
5974  *
5975  * If there are applications that are active memory-allocators
5976  * (most normal use), this basically shouldn't matter.
5977  */
5978 static int kswapd(void *p)
5979 {
5980         unsigned int alloc_order, reclaim_order;
5981         unsigned int classzone_idx = MAX_NR_ZONES - 1;
5982         pg_data_t *pgdat = (pg_data_t*)p;
5983         struct task_struct *tsk = current;
5984         const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
5985
5986         if (!cpumask_empty(cpumask))
5987                 set_cpus_allowed_ptr(tsk, cpumask);
5988
5989         /*
5990          * Tell the memory management that we're a "memory allocator",
5991          * and that if we need more memory we should get access to it
5992          * regardless (see "__alloc_pages()"). "kswapd" should
5993          * never get caught in the normal page freeing logic.
5994          *
5995          * (Kswapd normally doesn't need memory anyway, but sometimes
5996          * you need a small amount of memory in order to be able to
5997          * page out something else, and this flag essentially protects
5998          * us from recursively trying to free more memory as we're
5999          * trying to free the first piece of memory in the first place).
6000          */
6001         tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
6002         set_freezable();
6003
6004         pgdat->kswapd_order = 0;
6005         pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
6006         for ( ; ; ) {
6007                 bool ret;
6008
6009                 alloc_order = reclaim_order = pgdat->kswapd_order;
6010                 classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
6011
6012 kswapd_try_sleep:
6013                 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order,
6014                                         classzone_idx);
6015
6016                 /* Read the new order and classzone_idx */
6017                 alloc_order = reclaim_order = pgdat->kswapd_order;
6018                 classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
6019                 pgdat->kswapd_order = 0;
6020                 pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
6021
6022                 ret = try_to_freeze();
6023                 if (kthread_should_stop())
6024                         break;
6025
6026                 /*
6027                  * We can speed up thawing tasks if we don't call balance_pgdat
6028                  * after returning from the refrigerator
6029                  */
6030                 if (ret)
6031                         continue;
6032
6033                 /*
6034                  * Reclaim begins at the requested order but if a high-order
6035                  * reclaim fails then kswapd falls back to reclaiming for
6036                  * order-0. If that happens, kswapd will consider sleeping
6037                  * for the order it finished reclaiming at (reclaim_order)
6038                  * but kcompactd is woken to compact for the original
6039                  * request (alloc_order).
6040                  */
6041                 trace_mm_vmscan_kswapd_wake(pgdat->node_id, classzone_idx,
6042                                                 alloc_order);
6043                 reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx);
6044                 if (reclaim_order < alloc_order)
6045                         goto kswapd_try_sleep;
6046         }
6047
6048         tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
6049
6050         return 0;
6051 }
6052
6053 /*
6054  * A zone is low on free memory or too fragmented for high-order memory.  If
6055  * kswapd should reclaim (direct reclaim is deferred), wake it up for the zone's
6056  * pgdat.  It will wake up kcompactd after reclaiming memory.  If kswapd reclaim
6057  * has failed or is not needed, still wake up kcompactd if only compaction is
6058  * needed.
6059  */
6060 void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
6061                    enum zone_type classzone_idx)
6062 {
6063         pg_data_t *pgdat;
6064
6065         if (!managed_zone(zone))
6066                 return;
6067
6068         if (!cpuset_zone_allowed(zone, gfp_flags))
6069                 return;
6070         pgdat = zone->zone_pgdat;
6071
6072         if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
6073                 pgdat->kswapd_classzone_idx = classzone_idx;
6074         else
6075                 pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx,
6076                                                   classzone_idx);
6077         pgdat->kswapd_order = max(pgdat->kswapd_order, order);
6078         if (!waitqueue_active(&pgdat->kswapd_wait))
6079                 return;
6080
6081         /* Hopeless node, leave it to direct reclaim if possible */
6082         if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ||
6083             (pgdat_balanced(pgdat, order, classzone_idx) &&
6084              !pgdat_watermark_boosted(pgdat, classzone_idx))) {
6085                 /*
6086                  * There may be plenty of free memory available, but it's too
6087                  * fragmented for high-order allocations.  Wake up kcompactd
6088                  * and rely on compaction_suitable() to determine if it's
6089                  * needed.  If it fails, it will defer subsequent attempts to
6090                  * ratelimit its work.
6091                  */
6092                 if (!(gfp_flags & __GFP_DIRECT_RECLAIM))
6093                         wakeup_kcompactd(pgdat, order, classzone_idx);
6094                 return;
6095         }
6096
6097         trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, classzone_idx, order,
6098                                       gfp_flags);
6099         wake_up_interruptible(&pgdat->kswapd_wait);
6100 }
6101
6102 #ifdef CONFIG_HIBERNATION
6103 /*
6104  * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
6105  * freed pages.
6106  *
6107  * Rather than trying to age LRUs the aim is to preserve the overall
6108  * LRU order by reclaiming preferentially
6109  * inactive > active > active referenced > active mapped
6110  */
6111 unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
6112 {
6113         struct scan_control sc = {
6114                 .nr_to_reclaim = nr_to_reclaim,
6115                 .gfp_mask = GFP_HIGHUSER_MOVABLE,
6116                 .reclaim_idx = MAX_NR_ZONES - 1,
6117                 .priority = DEF_PRIORITY,
6118                 .may_writepage = 1,
6119                 .may_unmap = 1,
6120                 .may_swap = 1,
6121                 .hibernation_mode = 1,
6122         };
6123         struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
6124         unsigned long nr_reclaimed;
6125         unsigned int noreclaim_flag;
6126
6127         fs_reclaim_acquire(sc.gfp_mask);
6128         noreclaim_flag = memalloc_noreclaim_save();
6129         set_task_reclaim_state(current, &sc.reclaim_state);
6130
6131         nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
6132
6133         set_task_reclaim_state(current, NULL);
6134         memalloc_noreclaim_restore(noreclaim_flag);
6135         fs_reclaim_release(sc.gfp_mask);
6136
6137         return nr_reclaimed;
6138 }
6139 #endif /* CONFIG_HIBERNATION */
6140
6141 /* It's optimal to keep kswapds on the same CPUs as their memory, but
6142    not required for correctness.  So if the last cpu in a node goes
6143    away, we get changed to run anywhere: as the first one comes back,
6144    restore their cpu bindings. */
6145 static int kswapd_cpu_online(unsigned int cpu)
6146 {
6147         int nid;
6148
6149         for_each_node_state(nid, N_MEMORY) {
6150                 pg_data_t *pgdat = NODE_DATA(nid);
6151                 const struct cpumask *mask;
6152
6153                 mask = cpumask_of_node(pgdat->node_id);
6154
6155                 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
6156                         /* One of our CPUs online: restore mask */
6157                         set_cpus_allowed_ptr(pgdat->kswapd, mask);
6158         }
6159         return 0;
6160 }
6161
6162 /*
6163  * This kswapd start function will be called by init and node-hot-add.
6164  * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
6165  */
6166 int kswapd_run(int nid)
6167 {
6168         pg_data_t *pgdat = NODE_DATA(nid);
6169         int ret = 0;
6170
6171         if (pgdat->kswapd)
6172                 return 0;
6173
6174         pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
6175         if (IS_ERR(pgdat->kswapd)) {
6176                 /* failure at boot is fatal */
6177                 BUG_ON(system_state < SYSTEM_RUNNING);
6178                 pr_err("Failed to start kswapd on node %d\n", nid);
6179                 ret = PTR_ERR(pgdat->kswapd);
6180                 pgdat->kswapd = NULL;
6181         }
6182         return ret;
6183 }
6184
6185 /*
6186  * Called by memory hotplug when all memory in a node is offlined.  Caller must
6187  * hold mem_hotplug_begin/end().
6188  */
6189 void kswapd_stop(int nid)
6190 {
6191         struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
6192
6193         if (kswapd) {
6194                 kthread_stop(kswapd);
6195                 NODE_DATA(nid)->kswapd = NULL;
6196         }
6197 }
6198
6199 static int __init kswapd_init(void)
6200 {
6201         int nid, ret;
6202
6203         swap_setup();
6204         for_each_node_state(nid, N_MEMORY)
6205                 kswapd_run(nid);
6206         ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
6207                                         "mm/vmscan:online", kswapd_cpu_online,
6208                                         NULL);
6209         WARN_ON(ret < 0);
6210         return 0;
6211 }
6212
6213 module_init(kswapd_init)
6214
6215 #ifdef CONFIG_NUMA
6216 /*
6217  * Node reclaim mode
6218  *
6219  * If non-zero call node_reclaim when the number of free pages falls below
6220  * the watermarks.
6221  */
6222 int node_reclaim_mode __read_mostly;
6223
6224 #define RECLAIM_OFF 0
6225 #define RECLAIM_ZONE (1<<0)     /* Run shrink_inactive_list on the zone */
6226 #define RECLAIM_WRITE (1<<1)    /* Writeout pages during reclaim */
6227 #define RECLAIM_UNMAP (1<<2)    /* Unmap pages during reclaim */
6228
6229 /*
6230  * Priority for NODE_RECLAIM. This determines the fraction of pages
6231  * of a node considered for each zone_reclaim. 4 scans 1/16th of
6232  * a zone.
6233  */
6234 #define NODE_RECLAIM_PRIORITY 4
6235
6236 /*
6237  * Percentage of pages in a zone that must be unmapped for node_reclaim to
6238  * occur.
6239  */
6240 int sysctl_min_unmapped_ratio = 1;
6241
6242 /*
6243  * If the number of slab pages in a zone grows beyond this percentage then
6244  * slab reclaim needs to occur.
6245  */
6246 int sysctl_min_slab_ratio = 5;
6247
6248 static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
6249 {
6250         unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED);
6251         unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) +
6252                 node_page_state(pgdat, NR_ACTIVE_FILE);
6253
6254         /*
6255          * It's possible for there to be more file mapped pages than
6256          * accounted for by the pages on the file LRU lists because
6257          * tmpfs pages accounted for as ANON can also be FILE_MAPPED
6258          */
6259         return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
6260 }
6261
6262 /* Work out how many page cache pages we can reclaim in this reclaim_mode */
6263 static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
6264 {
6265         unsigned long nr_pagecache_reclaimable;
6266         unsigned long delta = 0;
6267
6268         /*
6269          * If RECLAIM_UNMAP is set, then all file pages are considered
6270          * potentially reclaimable. Otherwise, we have to worry about
6271          * pages like swapcache and node_unmapped_file_pages() provides
6272          * a better estimate
6273          */
6274         if (node_reclaim_mode & RECLAIM_UNMAP)
6275                 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES);
6276         else
6277                 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat);
6278
6279         /* If we can't clean pages, remove dirty pages from consideration */
6280         if (!(node_reclaim_mode & RECLAIM_WRITE))
6281                 delta += node_page_state(pgdat, NR_FILE_DIRTY);
6282
6283         /* Watch for any possible underflows due to delta */
6284         if (unlikely(delta > nr_pagecache_reclaimable))
6285                 delta = nr_pagecache_reclaimable;
6286
6287         return nr_pagecache_reclaimable - delta;
6288 }
6289
6290 /*
6291  * Try to free up some pages from this node through reclaim.
6292  */
6293 static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
6294 {
6295         /* Minimum pages needed in order to stay on node */
6296         const unsigned long nr_pages = 1 << order;
6297         struct task_struct *p = current;
6298         unsigned int noreclaim_flag;
6299         struct scan_control sc = {
6300                 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
6301                 .gfp_mask = current_gfp_context(gfp_mask),
6302                 .order = order,
6303                 .priority = NODE_RECLAIM_PRIORITY,
6304                 .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
6305                 .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
6306                 .may_swap = 1,
6307                 .reclaim_idx = gfp_zone(gfp_mask),
6308         };
6309
6310         trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
6311                                            sc.gfp_mask);
6312
6313         cond_resched();
6314         fs_reclaim_acquire(sc.gfp_mask);
6315         /*
6316          * We need to be able to allocate from the reserves for RECLAIM_UNMAP
6317          * and we also need to be able to write out pages for RECLAIM_WRITE
6318          * and RECLAIM_UNMAP.
6319          */
6320         noreclaim_flag = memalloc_noreclaim_save();
6321         p->flags |= PF_SWAPWRITE;
6322         set_task_reclaim_state(p, &sc.reclaim_state);
6323
6324         if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
6325                 /*
6326                  * Free memory by calling shrink node with increasing
6327                  * priorities until we have enough memory freed.
6328                  */
6329                 do {
6330                         shrink_node(pgdat, &sc);
6331                 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
6332         }
6333
6334         set_task_reclaim_state(p, NULL);
6335         current->flags &= ~PF_SWAPWRITE;
6336         memalloc_noreclaim_restore(noreclaim_flag);
6337         fs_reclaim_release(sc.gfp_mask);
6338
6339         trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed);
6340
6341         return sc.nr_reclaimed >= nr_pages;
6342 }
6343
6344 int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
6345 {
6346         int ret;
6347
6348         /*
6349          * Node reclaim reclaims unmapped file backed pages and
6350          * slab pages if we are over the defined limits.
6351          *
6352          * A small portion of unmapped file backed pages is needed for
6353          * file I/O otherwise pages read by file I/O will be immediately
6354          * thrown out if the node is overallocated. So we do not reclaim
6355          * if less than a specified percentage of the node is used by
6356          * unmapped file backed pages.
6357          */
6358         if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
6359             node_page_state(pgdat, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages)
6360                 return NODE_RECLAIM_FULL;
6361
6362         /*
6363          * Do not scan if the allocation should not be delayed.
6364          */
6365         if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC))
6366                 return NODE_RECLAIM_NOSCAN;
6367
6368         /*
6369          * Only run node reclaim on the local node or on nodes that do not
6370          * have associated processors. This will favor the local processor
6371          * over remote processors and spread off node memory allocations
6372          * as wide as possible.
6373          */
6374         if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
6375                 return NODE_RECLAIM_NOSCAN;
6376
6377         if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
6378                 return NODE_RECLAIM_NOSCAN;
6379
6380         ret = __node_reclaim(pgdat, gfp_mask, order);
6381         clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
6382
6383         if (!ret)
6384                 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
6385
6386         return ret;
6387 }
6388 #endif
6389
6390 /*
6391  * page_evictable - test whether a page is evictable
6392  * @page: the page to test
6393  *
6394  * Test whether page is evictable--i.e., should be placed on active/inactive
6395  * lists vs unevictable list.
6396  *
6397  * Reasons page might not be evictable:
6398  * (1) page's mapping marked unevictable
6399  * (2) page is part of an mlocked VMA
6400  *
6401  */
6402 int page_evictable(struct page *page)
6403 {
6404         int ret;
6405
6406         /* Prevent address_space of inode and swap cache from being freed */
6407         rcu_read_lock();
6408         ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
6409         rcu_read_unlock();
6410         return ret;
6411 }
6412
6413 /**
6414  * check_move_unevictable_pages - check pages for evictability and move to
6415  * appropriate zone lru list
6416  * @pvec: pagevec with lru pages to check
6417  *
6418  * Checks pages for evictability, if an evictable page is in the unevictable
6419  * lru list, moves it to the appropriate evictable lru list. This function
6420  * should be only used for lru pages.
6421  */
6422 void check_move_unevictable_pages(struct pagevec *pvec)
6423 {
6424         struct lruvec *lruvec;
6425         struct pglist_data *pgdat = NULL;
6426         int pgscanned = 0;
6427         int pgrescued = 0;
6428         int i;
6429
6430         for (i = 0; i < pvec->nr; i++) {
6431                 struct page *page = pvec->pages[i];
6432                 struct pglist_data *pagepgdat = page_pgdat(page);
6433
6434                 pgscanned++;
6435
6436                 if (!TestClearPageLRU(page))
6437                         continue;
6438
6439                 if (pagepgdat != pgdat) {
6440                         if (pgdat)
6441                                 spin_unlock_irq(&pgdat->lru_lock);
6442                         pgdat = pagepgdat;
6443                         spin_lock_irq(&pgdat->lru_lock);
6444                 }
6445                 lruvec = mem_cgroup_page_lruvec(page, pgdat);
6446
6447                 if (page_evictable(page) && PageUnevictable(page)) {
6448                         enum lru_list lru = page_lru_base_type(page);
6449
6450                         VM_BUG_ON_PAGE(PageActive(page), page);
6451                         ClearPageUnevictable(page);
6452                         del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
6453                         add_page_to_lru_list(page, lruvec, lru);
6454                         pgrescued++;
6455                 }
6456                 SetPageLRU(page);
6457         }
6458
6459         if (pgdat) {
6460                 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
6461                 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
6462                 spin_unlock_irq(&pgdat->lru_lock);
6463         } else if (pgscanned) {
6464                 count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
6465         }
6466 }
6467 EXPORT_SYMBOL_GPL(check_move_unevictable_pages);