5f802340fe59c08f6472b9ce557fda2d6fea4952
[platform/kernel/linux-rpi.git] / mm / vmscan.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/mm/vmscan.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *
7  *  Swap reorganised 29.12.95, Stephen Tweedie.
8  *  kswapd added: 7.1.96  sct
9  *  Removed kswapd_ctl limits, and swap out as many pages as needed
10  *  to bring the system back to freepages.high: 2.4.97, Rik van Riel.
11  *  Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
12  *  Multiqueue VM started 5.8.00, Rik van Riel.
13  */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/mm.h>
18 #include <linux/sched/mm.h>
19 #include <linux/module.h>
20 #include <linux/gfp.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/swap.h>
23 #include <linux/pagemap.h>
24 #include <linux/init.h>
25 #include <linux/highmem.h>
26 #include <linux/vmpressure.h>
27 #include <linux/vmstat.h>
28 #include <linux/file.h>
29 #include <linux/writeback.h>
30 #include <linux/blkdev.h>
31 #include <linux/buffer_head.h>  /* for try_to_release_page(),
32                                         buffer_heads_over_limit */
33 #include <linux/mm_inline.h>
34 #include <linux/backing-dev.h>
35 #include <linux/rmap.h>
36 #include <linux/topology.h>
37 #include <linux/cpu.h>
38 #include <linux/cpuset.h>
39 #include <linux/compaction.h>
40 #include <linux/notifier.h>
41 #include <linux/rwsem.h>
42 #include <linux/delay.h>
43 #include <linux/kthread.h>
44 #include <linux/freezer.h>
45 #include <linux/memcontrol.h>
46 #include <linux/delayacct.h>
47 #include <linux/sysctl.h>
48 #include <linux/oom.h>
49 #include <linux/pagevec.h>
50 #include <linux/prefetch.h>
51 #include <linux/printk.h>
52 #include <linux/dax.h>
53 #include <linux/psi.h>
54 #include <linux/pagewalk.h>
55 #include <linux/shmem_fs.h>
56 #include <linux/ctype.h>
57 #include <linux/debugfs.h>
58
59 #include <asm/tlbflush.h>
60 #include <asm/div64.h>
61
62 #include <linux/swapops.h>
63 #include <linux/balloon_compaction.h>
64
65 #include "internal.h"
66
67 #define CREATE_TRACE_POINTS
68 #include <trace/events/vmscan.h>
69
70 struct scan_control {
71         /* How many pages shrink_list() should reclaim */
72         unsigned long nr_to_reclaim;
73
74         /*
75          * Nodemask of nodes allowed by the caller. If NULL, all nodes
76          * are scanned.
77          */
78         nodemask_t      *nodemask;
79
80         /*
81          * The memory cgroup that hit its limit and as a result is the
82          * primary target of this reclaim invocation.
83          */
84         struct mem_cgroup *target_mem_cgroup;
85
86         /* Can active pages be deactivated as part of reclaim? */
87 #define DEACTIVATE_ANON 1
88 #define DEACTIVATE_FILE 2
89         unsigned int may_deactivate:2;
90         unsigned int force_deactivate:1;
91         unsigned int skipped_deactivate:1;
92
93         /* Writepage batching in laptop mode; RECLAIM_WRITE */
94         unsigned int may_writepage:1;
95
96         /* Can mapped pages be reclaimed? */
97         unsigned int may_unmap:1;
98
99         /* Can pages be swapped as part of reclaim? */
100         unsigned int may_swap:1;
101
102         /*
103          * Cgroups are not reclaimed below their configured memory.low,
104          * unless we threaten to OOM. If any cgroups are skipped due to
105          * memory.low and nothing was reclaimed, go back for memory.low.
106          */
107         unsigned int memcg_low_reclaim:1;
108         unsigned int memcg_low_skipped:1;
109
110         unsigned int hibernation_mode:1;
111
112         /* One of the zones is ready for compaction */
113         unsigned int compaction_ready:1;
114
115         /* There is easily reclaimable cold cache in the current node */
116         unsigned int cache_trim_mode:1;
117
118         /* The file pages on the current node are dangerously low */
119         unsigned int file_is_tiny:1;
120
121 #ifdef CONFIG_LRU_GEN
122         /* help kswapd make better choices among multiple memcgs */
123         unsigned int memcgs_need_aging:1;
124         unsigned long last_reclaimed;
125 #endif
126
127         /* Allocation order */
128         s8 order;
129
130         /* Scan (total_size >> priority) pages at once */
131         s8 priority;
132
133         /* The highest zone to isolate pages for reclaim from */
134         s8 reclaim_idx;
135
136         /* This context's GFP mask */
137         gfp_t gfp_mask;
138
139         /* Incremented by the number of inactive pages that were scanned */
140         unsigned long nr_scanned;
141
142         /* Number of pages freed so far during a call to shrink_zones() */
143         unsigned long nr_reclaimed;
144
145         struct {
146                 unsigned int dirty;
147                 unsigned int unqueued_dirty;
148                 unsigned int congested;
149                 unsigned int writeback;
150                 unsigned int immediate;
151                 unsigned int file_taken;
152                 unsigned int taken;
153         } nr;
154
155         /* for recording the reclaimed slab by now */
156         struct reclaim_state reclaim_state;
157 };
158
159 #ifdef ARCH_HAS_PREFETCH
160 #define prefetch_prev_lru_page(_page, _base, _field)                    \
161         do {                                                            \
162                 if ((_page)->lru.prev != _base) {                       \
163                         struct page *prev;                              \
164                                                                         \
165                         prev = lru_to_page(&(_page->lru));              \
166                         prefetch(&prev->_field);                        \
167                 }                                                       \
168         } while (0)
169 #else
170 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
171 #endif
172
173 #ifdef ARCH_HAS_PREFETCHW
174 #define prefetchw_prev_lru_page(_page, _base, _field)                   \
175         do {                                                            \
176                 if ((_page)->lru.prev != _base) {                       \
177                         struct page *prev;                              \
178                                                                         \
179                         prev = lru_to_page(&(_page->lru));              \
180                         prefetchw(&prev->_field);                       \
181                 }                                                       \
182         } while (0)
183 #else
184 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
185 #endif
186
187 /*
188  * From 0 .. 100.  Higher means more swappy.
189  */
190 int vm_swappiness = 60;
191 /*
192  * The total number of pages which are beyond the high watermark within all
193  * zones.
194  */
195 unsigned long vm_total_pages;
196
197 static void set_task_reclaim_state(struct task_struct *task,
198                                    struct reclaim_state *rs)
199 {
200         /* Check for an overwrite */
201         WARN_ON_ONCE(rs && task->reclaim_state);
202
203         /* Check for the nulling of an already-nulled member */
204         WARN_ON_ONCE(!rs && !task->reclaim_state);
205
206         task->reclaim_state = rs;
207 }
208
209 static LIST_HEAD(shrinker_list);
210 static DECLARE_RWSEM(shrinker_rwsem);
211
212 #ifdef CONFIG_MEMCG
213 /*
214  * We allow subsystems to populate their shrinker-related
215  * LRU lists before register_shrinker_prepared() is called
216  * for the shrinker, since we don't want to impose
217  * restrictions on their internal registration order.
218  * In this case shrink_slab_memcg() may find corresponding
219  * bit is set in the shrinkers map.
220  *
221  * This value is used by the function to detect registering
222  * shrinkers and to skip do_shrink_slab() calls for them.
223  */
224 #define SHRINKER_REGISTERING ((struct shrinker *)~0UL)
225
226 static DEFINE_IDR(shrinker_idr);
227 static int shrinker_nr_max;
228
229 static int prealloc_memcg_shrinker(struct shrinker *shrinker)
230 {
231         int id, ret = -ENOMEM;
232
233         down_write(&shrinker_rwsem);
234         /* This may call shrinker, so it must use down_read_trylock() */
235         id = idr_alloc(&shrinker_idr, SHRINKER_REGISTERING, 0, 0, GFP_KERNEL);
236         if (id < 0)
237                 goto unlock;
238
239         if (id >= shrinker_nr_max) {
240                 if (memcg_expand_shrinker_maps(id)) {
241                         idr_remove(&shrinker_idr, id);
242                         goto unlock;
243                 }
244
245                 shrinker_nr_max = id + 1;
246         }
247         shrinker->id = id;
248         ret = 0;
249 unlock:
250         up_write(&shrinker_rwsem);
251         return ret;
252 }
253
254 static void unregister_memcg_shrinker(struct shrinker *shrinker)
255 {
256         int id = shrinker->id;
257
258         BUG_ON(id < 0);
259
260         down_write(&shrinker_rwsem);
261         idr_remove(&shrinker_idr, id);
262         up_write(&shrinker_rwsem);
263 }
264
265 static bool cgroup_reclaim(struct scan_control *sc)
266 {
267         return sc->target_mem_cgroup;
268 }
269
270 /**
271  * writeback_throttling_sane - is the usual dirty throttling mechanism available?
272  * @sc: scan_control in question
273  *
274  * The normal page dirty throttling mechanism in balance_dirty_pages() is
275  * completely broken with the legacy memcg and direct stalling in
276  * shrink_page_list() is used for throttling instead, which lacks all the
277  * niceties such as fairness, adaptive pausing, bandwidth proportional
278  * allocation and configurability.
279  *
280  * This function tests whether the vmscan currently in progress can assume
281  * that the normal dirty throttling mechanism is operational.
282  */
283 static bool writeback_throttling_sane(struct scan_control *sc)
284 {
285         if (!cgroup_reclaim(sc))
286                 return true;
287 #ifdef CONFIG_CGROUP_WRITEBACK
288         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
289                 return true;
290 #endif
291         return false;
292 }
293 #else
294 static int prealloc_memcg_shrinker(struct shrinker *shrinker)
295 {
296         return 0;
297 }
298
299 static void unregister_memcg_shrinker(struct shrinker *shrinker)
300 {
301 }
302
303 static bool cgroup_reclaim(struct scan_control *sc)
304 {
305         return false;
306 }
307
308 static bool writeback_throttling_sane(struct scan_control *sc)
309 {
310         return true;
311 }
312 #endif
313
314 /*
315  * This misses isolated pages which are not accounted for to save counters.
316  * As the data only determines if reclaim or compaction continues, it is
317  * not expected that isolated pages will be a dominating factor.
318  */
319 unsigned long zone_reclaimable_pages(struct zone *zone)
320 {
321         unsigned long nr;
322
323         nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
324                 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
325         if (get_nr_swap_pages() > 0)
326                 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
327                         zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
328
329         return nr;
330 }
331
332 /**
333  * lruvec_lru_size -  Returns the number of pages on the given LRU list.
334  * @lruvec: lru vector
335  * @lru: lru to use
336  * @zone_idx: zones to consider (use MAX_NR_ZONES for the whole LRU list)
337  */
338 unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
339 {
340         unsigned long size = 0;
341         int zid;
342
343         for (zid = 0; zid <= zone_idx && zid < MAX_NR_ZONES; zid++) {
344                 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
345
346                 if (!managed_zone(zone))
347                         continue;
348
349                 if (!mem_cgroup_disabled())
350                         size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
351                 else
352                         size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru);
353         }
354         return size;
355 }
356
357 /*
358  * Add a shrinker callback to be called from the vm.
359  */
360 int prealloc_shrinker(struct shrinker *shrinker)
361 {
362         unsigned int size = sizeof(*shrinker->nr_deferred);
363
364         if (shrinker->flags & SHRINKER_NUMA_AWARE)
365                 size *= nr_node_ids;
366
367         shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
368         if (!shrinker->nr_deferred)
369                 return -ENOMEM;
370
371         if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
372                 if (prealloc_memcg_shrinker(shrinker))
373                         goto free_deferred;
374         }
375
376         return 0;
377
378 free_deferred:
379         kfree(shrinker->nr_deferred);
380         shrinker->nr_deferred = NULL;
381         return -ENOMEM;
382 }
383
384 void free_prealloced_shrinker(struct shrinker *shrinker)
385 {
386         if (!shrinker->nr_deferred)
387                 return;
388
389         if (shrinker->flags & SHRINKER_MEMCG_AWARE)
390                 unregister_memcg_shrinker(shrinker);
391
392         kfree(shrinker->nr_deferred);
393         shrinker->nr_deferred = NULL;
394 }
395
396 void register_shrinker_prepared(struct shrinker *shrinker)
397 {
398         down_write(&shrinker_rwsem);
399         list_add_tail(&shrinker->list, &shrinker_list);
400 #ifdef CONFIG_MEMCG
401         if (shrinker->flags & SHRINKER_MEMCG_AWARE)
402                 idr_replace(&shrinker_idr, shrinker, shrinker->id);
403 #endif
404         up_write(&shrinker_rwsem);
405 }
406
407 int register_shrinker(struct shrinker *shrinker)
408 {
409         int err = prealloc_shrinker(shrinker);
410
411         if (err)
412                 return err;
413         register_shrinker_prepared(shrinker);
414         return 0;
415 }
416 EXPORT_SYMBOL(register_shrinker);
417
418 /*
419  * Remove one
420  */
421 void unregister_shrinker(struct shrinker *shrinker)
422 {
423         if (!shrinker->nr_deferred)
424                 return;
425         if (shrinker->flags & SHRINKER_MEMCG_AWARE)
426                 unregister_memcg_shrinker(shrinker);
427         down_write(&shrinker_rwsem);
428         list_del(&shrinker->list);
429         up_write(&shrinker_rwsem);
430         kfree(shrinker->nr_deferred);
431         shrinker->nr_deferred = NULL;
432 }
433 EXPORT_SYMBOL(unregister_shrinker);
434
435 #define SHRINK_BATCH 128
436
437 static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
438                                     struct shrinker *shrinker, int priority)
439 {
440         unsigned long freed = 0;
441         unsigned long long delta;
442         long total_scan;
443         long freeable;
444         long nr;
445         long new_nr;
446         int nid = shrinkctl->nid;
447         long batch_size = shrinker->batch ? shrinker->batch
448                                           : SHRINK_BATCH;
449         long scanned = 0, next_deferred;
450
451         if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
452                 nid = 0;
453
454         freeable = shrinker->count_objects(shrinker, shrinkctl);
455         if (freeable == 0 || freeable == SHRINK_EMPTY)
456                 return freeable;
457
458         /*
459          * copy the current shrinker scan count into a local variable
460          * and zero it so that other concurrent shrinker invocations
461          * don't also do this scanning work.
462          */
463         nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
464
465         total_scan = nr;
466         if (shrinker->seeks) {
467                 delta = freeable >> priority;
468                 delta *= 4;
469                 do_div(delta, shrinker->seeks);
470         } else {
471                 /*
472                  * These objects don't require any IO to create. Trim
473                  * them aggressively under memory pressure to keep
474                  * them from causing refetches in the IO caches.
475                  */
476                 delta = freeable / 2;
477         }
478
479         total_scan += delta;
480         if (total_scan < 0) {
481                 pr_err("shrink_slab: %pS negative objects to delete nr=%ld\n",
482                        shrinker->scan_objects, total_scan);
483                 total_scan = freeable;
484                 next_deferred = nr;
485         } else
486                 next_deferred = total_scan;
487
488         /*
489          * We need to avoid excessive windup on filesystem shrinkers
490          * due to large numbers of GFP_NOFS allocations causing the
491          * shrinkers to return -1 all the time. This results in a large
492          * nr being built up so when a shrink that can do some work
493          * comes along it empties the entire cache due to nr >>>
494          * freeable. This is bad for sustaining a working set in
495          * memory.
496          *
497          * Hence only allow the shrinker to scan the entire cache when
498          * a large delta change is calculated directly.
499          */
500         if (delta < freeable / 4)
501                 total_scan = min(total_scan, freeable / 2);
502
503         /*
504          * Avoid risking looping forever due to too large nr value:
505          * never try to free more than twice the estimate number of
506          * freeable entries.
507          */
508         if (total_scan > freeable * 2)
509                 total_scan = freeable * 2;
510
511         trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
512                                    freeable, delta, total_scan, priority);
513
514         /*
515          * Normally, we should not scan less than batch_size objects in one
516          * pass to avoid too frequent shrinker calls, but if the slab has less
517          * than batch_size objects in total and we are really tight on memory,
518          * we will try to reclaim all available objects, otherwise we can end
519          * up failing allocations although there are plenty of reclaimable
520          * objects spread over several slabs with usage less than the
521          * batch_size.
522          *
523          * We detect the "tight on memory" situations by looking at the total
524          * number of objects we want to scan (total_scan). If it is greater
525          * than the total number of objects on slab (freeable), we must be
526          * scanning at high prio and therefore should try to reclaim as much as
527          * possible.
528          */
529         while (total_scan >= batch_size ||
530                total_scan >= freeable) {
531                 unsigned long ret;
532                 unsigned long nr_to_scan = min(batch_size, total_scan);
533
534                 shrinkctl->nr_to_scan = nr_to_scan;
535                 shrinkctl->nr_scanned = nr_to_scan;
536                 ret = shrinker->scan_objects(shrinker, shrinkctl);
537                 if (ret == SHRINK_STOP)
538                         break;
539                 freed += ret;
540
541                 count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned);
542                 total_scan -= shrinkctl->nr_scanned;
543                 scanned += shrinkctl->nr_scanned;
544
545                 cond_resched();
546         }
547
548         if (next_deferred >= scanned)
549                 next_deferred -= scanned;
550         else
551                 next_deferred = 0;
552         /*
553          * move the unused scan count back into the shrinker in a
554          * manner that handles concurrent updates. If we exhausted the
555          * scan, there is no need to do an update.
556          */
557         if (next_deferred > 0)
558                 new_nr = atomic_long_add_return(next_deferred,
559                                                 &shrinker->nr_deferred[nid]);
560         else
561                 new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
562
563         trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan);
564         return freed;
565 }
566
567 #ifdef CONFIG_MEMCG
568 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
569                         struct mem_cgroup *memcg, int priority)
570 {
571         struct memcg_shrinker_map *map;
572         unsigned long ret, freed = 0;
573         int i;
574
575         if (!mem_cgroup_online(memcg))
576                 return 0;
577
578         if (!down_read_trylock(&shrinker_rwsem))
579                 return 0;
580
581         map = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_map,
582                                         true);
583         if (unlikely(!map))
584                 goto unlock;
585
586         for_each_set_bit(i, map->map, shrinker_nr_max) {
587                 struct shrink_control sc = {
588                         .gfp_mask = gfp_mask,
589                         .nid = nid,
590                         .memcg = memcg,
591                 };
592                 struct shrinker *shrinker;
593
594                 shrinker = idr_find(&shrinker_idr, i);
595                 if (unlikely(!shrinker || shrinker == SHRINKER_REGISTERING)) {
596                         if (!shrinker)
597                                 clear_bit(i, map->map);
598                         continue;
599                 }
600
601                 /* Call non-slab shrinkers even though kmem is disabled */
602                 if (!memcg_kmem_enabled() &&
603                     !(shrinker->flags & SHRINKER_NONSLAB))
604                         continue;
605
606                 ret = do_shrink_slab(&sc, shrinker, priority);
607                 if (ret == SHRINK_EMPTY) {
608                         clear_bit(i, map->map);
609                         /*
610                          * After the shrinker reported that it had no objects to
611                          * free, but before we cleared the corresponding bit in
612                          * the memcg shrinker map, a new object might have been
613                          * added. To make sure, we have the bit set in this
614                          * case, we invoke the shrinker one more time and reset
615                          * the bit if it reports that it is not empty anymore.
616                          * The memory barrier here pairs with the barrier in
617                          * memcg_set_shrinker_bit():
618                          *
619                          * list_lru_add()     shrink_slab_memcg()
620                          *   list_add_tail()    clear_bit()
621                          *   <MB>               <MB>
622                          *   set_bit()          do_shrink_slab()
623                          */
624                         smp_mb__after_atomic();
625                         ret = do_shrink_slab(&sc, shrinker, priority);
626                         if (ret == SHRINK_EMPTY)
627                                 ret = 0;
628                         else
629                                 memcg_set_shrinker_bit(memcg, nid, i);
630                 }
631                 freed += ret;
632
633                 if (rwsem_is_contended(&shrinker_rwsem)) {
634                         freed = freed ? : 1;
635                         break;
636                 }
637         }
638 unlock:
639         up_read(&shrinker_rwsem);
640         return freed;
641 }
642 #else /* CONFIG_MEMCG */
643 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
644                         struct mem_cgroup *memcg, int priority)
645 {
646         return 0;
647 }
648 #endif /* CONFIG_MEMCG */
649
650 /**
651  * shrink_slab - shrink slab caches
652  * @gfp_mask: allocation context
653  * @nid: node whose slab caches to target
654  * @memcg: memory cgroup whose slab caches to target
655  * @priority: the reclaim priority
656  *
657  * Call the shrink functions to age shrinkable caches.
658  *
659  * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set,
660  * unaware shrinkers will receive a node id of 0 instead.
661  *
662  * @memcg specifies the memory cgroup to target. Unaware shrinkers
663  * are called only if it is the root cgroup.
664  *
665  * @priority is sc->priority, we take the number of objects and >> by priority
666  * in order to get the scan target.
667  *
668  * Returns the number of reclaimed slab objects.
669  */
670 static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
671                                  struct mem_cgroup *memcg,
672                                  int priority)
673 {
674         unsigned long ret, freed = 0;
675         struct shrinker *shrinker;
676
677         /*
678          * The root memcg might be allocated even though memcg is disabled
679          * via "cgroup_disable=memory" boot parameter.  This could make
680          * mem_cgroup_is_root() return false, then just run memcg slab
681          * shrink, but skip global shrink.  This may result in premature
682          * oom.
683          */
684         if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
685                 return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
686
687         if (!down_read_trylock(&shrinker_rwsem))
688                 goto out;
689
690         list_for_each_entry(shrinker, &shrinker_list, list) {
691                 struct shrink_control sc = {
692                         .gfp_mask = gfp_mask,
693                         .nid = nid,
694                         .memcg = memcg,
695                 };
696
697                 ret = do_shrink_slab(&sc, shrinker, priority);
698                 if (ret == SHRINK_EMPTY)
699                         ret = 0;
700                 freed += ret;
701                 /*
702                  * Bail out if someone want to register a new shrinker to
703                  * prevent the regsitration from being stalled for long periods
704                  * by parallel ongoing shrinking.
705                  */
706                 if (rwsem_is_contended(&shrinker_rwsem)) {
707                         freed = freed ? : 1;
708                         break;
709                 }
710         }
711
712         up_read(&shrinker_rwsem);
713 out:
714         cond_resched();
715         return freed;
716 }
717
718 void drop_slab_node(int nid)
719 {
720         unsigned long freed;
721
722         do {
723                 struct mem_cgroup *memcg = NULL;
724
725                 freed = 0;
726                 memcg = mem_cgroup_iter(NULL, NULL, NULL);
727                 do {
728                         freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
729                 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
730         } while (freed > 10);
731 }
732
733 void drop_slab(void)
734 {
735         int nid;
736
737         for_each_online_node(nid)
738                 drop_slab_node(nid);
739 }
740
741 static inline int is_page_cache_freeable(struct page *page)
742 {
743         /*
744          * A freeable page cache page is referenced only by the caller
745          * that isolated the page, the page cache and optional buffer
746          * heads at page->private.
747          */
748         int page_cache_pins = PageTransHuge(page) && PageSwapCache(page) ?
749                 HPAGE_PMD_NR : 1;
750         return page_count(page) - page_has_private(page) == 1 + page_cache_pins;
751 }
752
753 static int may_write_to_inode(struct inode *inode, struct scan_control *sc)
754 {
755         if (current->flags & PF_SWAPWRITE)
756                 return 1;
757         if (!inode_write_congested(inode))
758                 return 1;
759         if (inode_to_bdi(inode) == current->backing_dev_info)
760                 return 1;
761         return 0;
762 }
763
764 /*
765  * We detected a synchronous write error writing a page out.  Probably
766  * -ENOSPC.  We need to propagate that into the address_space for a subsequent
767  * fsync(), msync() or close().
768  *
769  * The tricky part is that after writepage we cannot touch the mapping: nothing
770  * prevents it from being freed up.  But we have a ref on the page and once
771  * that page is locked, the mapping is pinned.
772  *
773  * We're allowed to run sleeping lock_page() here because we know the caller has
774  * __GFP_FS.
775  */
776 static void handle_write_error(struct address_space *mapping,
777                                 struct page *page, int error)
778 {
779         lock_page(page);
780         if (page_mapping(page) == mapping)
781                 mapping_set_error(mapping, error);
782         unlock_page(page);
783 }
784
785 /* possible outcome of pageout() */
786 typedef enum {
787         /* failed to write page out, page is locked */
788         PAGE_KEEP,
789         /* move page to the active list, page is locked */
790         PAGE_ACTIVATE,
791         /* page has been sent to the disk successfully, page is unlocked */
792         PAGE_SUCCESS,
793         /* page is clean and locked */
794         PAGE_CLEAN,
795 } pageout_t;
796
797 /*
798  * pageout is called by shrink_page_list() for each dirty page.
799  * Calls ->writepage().
800  */
801 static pageout_t pageout(struct page *page, struct address_space *mapping,
802                          struct scan_control *sc)
803 {
804         /*
805          * If the page is dirty, only perform writeback if that write
806          * will be non-blocking.  To prevent this allocation from being
807          * stalled by pagecache activity.  But note that there may be
808          * stalls if we need to run get_block().  We could test
809          * PagePrivate for that.
810          *
811          * If this process is currently in __generic_file_write_iter() against
812          * this page's queue, we can perform writeback even if that
813          * will block.
814          *
815          * If the page is swapcache, write it back even if that would
816          * block, for some throttling. This happens by accident, because
817          * swap_backing_dev_info is bust: it doesn't reflect the
818          * congestion state of the swapdevs.  Easy to fix, if needed.
819          */
820         if (!is_page_cache_freeable(page))
821                 return PAGE_KEEP;
822         if (!mapping) {
823                 /*
824                  * Some data journaling orphaned pages can have
825                  * page->mapping == NULL while being dirty with clean buffers.
826                  */
827                 if (page_has_private(page)) {
828                         if (try_to_free_buffers(page)) {
829                                 ClearPageDirty(page);
830                                 pr_info("%s: orphaned page\n", __func__);
831                                 return PAGE_CLEAN;
832                         }
833                 }
834                 return PAGE_KEEP;
835         }
836         if (mapping->a_ops->writepage == NULL)
837                 return PAGE_ACTIVATE;
838         if (!may_write_to_inode(mapping->host, sc))
839                 return PAGE_KEEP;
840
841         if (clear_page_dirty_for_io(page)) {
842                 int res;
843                 struct writeback_control wbc = {
844                         .sync_mode = WB_SYNC_NONE,
845                         .nr_to_write = SWAP_CLUSTER_MAX,
846                         .range_start = 0,
847                         .range_end = LLONG_MAX,
848                         .for_reclaim = 1,
849                 };
850
851                 SetPageReclaim(page);
852                 res = mapping->a_ops->writepage(page, &wbc);
853                 if (res < 0)
854                         handle_write_error(mapping, page, res);
855                 if (res == AOP_WRITEPAGE_ACTIVATE) {
856                         ClearPageReclaim(page);
857                         return PAGE_ACTIVATE;
858                 }
859
860                 if (!PageWriteback(page)) {
861                         /* synchronous write or broken a_ops? */
862                         ClearPageReclaim(page);
863                 }
864                 trace_mm_vmscan_writepage(page);
865                 inc_node_page_state(page, NR_VMSCAN_WRITE);
866                 return PAGE_SUCCESS;
867         }
868
869         return PAGE_CLEAN;
870 }
871
872 /*
873  * Same as remove_mapping, but if the page is removed from the mapping, it
874  * gets returned with a refcount of 0.
875  */
876 static int __remove_mapping(struct address_space *mapping, struct page *page,
877                             bool reclaimed, struct mem_cgroup *target_memcg)
878 {
879         unsigned long flags;
880         int refcount;
881
882         BUG_ON(!PageLocked(page));
883         BUG_ON(mapping != page_mapping(page));
884
885         xa_lock_irqsave(&mapping->i_pages, flags);
886         /*
887          * The non racy check for a busy page.
888          *
889          * Must be careful with the order of the tests. When someone has
890          * a ref to the page, it may be possible that they dirty it then
891          * drop the reference. So if PageDirty is tested before page_count
892          * here, then the following race may occur:
893          *
894          * get_user_pages(&page);
895          * [user mapping goes away]
896          * write_to(page);
897          *                              !PageDirty(page)    [good]
898          * SetPageDirty(page);
899          * put_page(page);
900          *                              !page_count(page)   [good, discard it]
901          *
902          * [oops, our write_to data is lost]
903          *
904          * Reversing the order of the tests ensures such a situation cannot
905          * escape unnoticed. The smp_rmb is needed to ensure the page->flags
906          * load is not satisfied before that of page->_refcount.
907          *
908          * Note that if SetPageDirty is always performed via set_page_dirty,
909          * and thus under the i_pages lock, then this ordering is not required.
910          */
911         refcount = 1 + compound_nr(page);
912         if (!page_ref_freeze(page, refcount))
913                 goto cannot_free;
914         /* note: atomic_cmpxchg in page_ref_freeze provides the smp_rmb */
915         if (unlikely(PageDirty(page))) {
916                 page_ref_unfreeze(page, refcount);
917                 goto cannot_free;
918         }
919
920         if (PageSwapCache(page)) {
921                 swp_entry_t swap = { .val = page_private(page) };
922                 mem_cgroup_swapout(page, swap);
923                 __delete_from_swap_cache(page, swap);
924                 xa_unlock_irqrestore(&mapping->i_pages, flags);
925                 put_swap_page(page, swap);
926         } else {
927                 void (*freepage)(struct page *);
928                 void *shadow = NULL;
929
930                 freepage = mapping->a_ops->freepage;
931                 /*
932                  * Remember a shadow entry for reclaimed file cache in
933                  * order to detect refaults, thus thrashing, later on.
934                  *
935                  * But don't store shadows in an address space that is
936                  * already exiting.  This is not just an optizimation,
937                  * inode reclaim needs to empty out the radix tree or
938                  * the nodes are lost.  Don't plant shadows behind its
939                  * back.
940                  *
941                  * We also don't store shadows for DAX mappings because the
942                  * only page cache pages found in these are zero pages
943                  * covering holes, and because we don't want to mix DAX
944                  * exceptional entries and shadow exceptional entries in the
945                  * same address_space.
946                  */
947                 if (reclaimed && page_is_file_cache(page) &&
948                     !mapping_exiting(mapping) && !dax_mapping(mapping))
949                         shadow = workingset_eviction(page, target_memcg);
950                 __delete_from_page_cache(page, shadow);
951                 xa_unlock_irqrestore(&mapping->i_pages, flags);
952
953                 if (freepage != NULL)
954                         freepage(page);
955         }
956
957         return 1;
958
959 cannot_free:
960         xa_unlock_irqrestore(&mapping->i_pages, flags);
961         return 0;
962 }
963
964 /*
965  * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
966  * someone else has a ref on the page, abort and return 0.  If it was
967  * successfully detached, return 1.  Assumes the caller has a single ref on
968  * this page.
969  */
970 int remove_mapping(struct address_space *mapping, struct page *page)
971 {
972         if (__remove_mapping(mapping, page, false, NULL)) {
973                 /*
974                  * Unfreezing the refcount with 1 rather than 2 effectively
975                  * drops the pagecache ref for us without requiring another
976                  * atomic operation.
977                  */
978                 page_ref_unfreeze(page, 1);
979                 return 1;
980         }
981         return 0;
982 }
983
984 /**
985  * putback_lru_page - put previously isolated page onto appropriate LRU list
986  * @page: page to be put back to appropriate lru list
987  *
988  * Add previously isolated @page to appropriate LRU list.
989  * Page may still be unevictable for other reasons.
990  *
991  * lru_lock must not be held, interrupts must be enabled.
992  */
993 void putback_lru_page(struct page *page)
994 {
995         lru_cache_add(page);
996         put_page(page);         /* drop ref from isolate */
997 }
998
999 enum page_references {
1000         PAGEREF_RECLAIM,
1001         PAGEREF_RECLAIM_CLEAN,
1002         PAGEREF_KEEP,
1003         PAGEREF_ACTIVATE,
1004 };
1005
1006 static enum page_references page_check_references(struct page *page,
1007                                                   struct scan_control *sc)
1008 {
1009         int referenced_ptes, referenced_page;
1010         unsigned long vm_flags;
1011
1012         referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
1013                                           &vm_flags);
1014         referenced_page = TestClearPageReferenced(page);
1015
1016         /*
1017          * Mlock lost the isolation race with us.  Let try_to_unmap()
1018          * move the page to the unevictable list.
1019          */
1020         if (vm_flags & VM_LOCKED)
1021                 return PAGEREF_RECLAIM;
1022
1023         if (referenced_ptes) {
1024                 if (PageSwapBacked(page))
1025                         return PAGEREF_ACTIVATE;
1026                 /*
1027                  * All mapped pages start out with page table
1028                  * references from the instantiating fault, so we need
1029                  * to look twice if a mapped file page is used more
1030                  * than once.
1031                  *
1032                  * Mark it and spare it for another trip around the
1033                  * inactive list.  Another page table reference will
1034                  * lead to its activation.
1035                  *
1036                  * Note: the mark is set for activated pages as well
1037                  * so that recently deactivated but used pages are
1038                  * quickly recovered.
1039                  */
1040                 SetPageReferenced(page);
1041
1042                 if (referenced_page || referenced_ptes > 1)
1043                         return PAGEREF_ACTIVATE;
1044
1045                 /*
1046                  * Activate file-backed executable pages after first usage.
1047                  */
1048                 if (vm_flags & VM_EXEC)
1049                         return PAGEREF_ACTIVATE;
1050
1051                 return PAGEREF_KEEP;
1052         }
1053
1054         /* Reclaim if clean, defer dirty pages to writeback */
1055         if (referenced_page && !PageSwapBacked(page))
1056                 return PAGEREF_RECLAIM_CLEAN;
1057
1058         return PAGEREF_RECLAIM;
1059 }
1060
1061 /* Check if a page is dirty or under writeback */
1062 static void page_check_dirty_writeback(struct page *page,
1063                                        bool *dirty, bool *writeback)
1064 {
1065         struct address_space *mapping;
1066
1067         /*
1068          * Anonymous pages are not handled by flushers and must be written
1069          * from reclaim context. Do not stall reclaim based on them
1070          */
1071         if (!page_is_file_cache(page) ||
1072             (PageAnon(page) && !PageSwapBacked(page))) {
1073                 *dirty = false;
1074                 *writeback = false;
1075                 return;
1076         }
1077
1078         /* By default assume that the page flags are accurate */
1079         *dirty = PageDirty(page);
1080         *writeback = PageWriteback(page);
1081
1082         /* Verify dirty/writeback state if the filesystem supports it */
1083         if (!page_has_private(page))
1084                 return;
1085
1086         mapping = page_mapping(page);
1087         if (mapping && mapping->a_ops->is_dirty_writeback)
1088                 mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
1089 }
1090
1091 /*
1092  * shrink_page_list() returns the number of reclaimed pages
1093  */
1094 static unsigned long shrink_page_list(struct list_head *page_list,
1095                                       struct pglist_data *pgdat,
1096                                       struct scan_control *sc,
1097                                       struct reclaim_stat *stat,
1098                                       bool ignore_references)
1099 {
1100         LIST_HEAD(ret_pages);
1101         LIST_HEAD(free_pages);
1102         unsigned nr_reclaimed = 0;
1103         unsigned pgactivate = 0;
1104
1105         memset(stat, 0, sizeof(*stat));
1106         cond_resched();
1107
1108         while (!list_empty(page_list)) {
1109                 struct address_space *mapping;
1110                 struct page *page;
1111                 int may_enter_fs;
1112                 enum page_references references = PAGEREF_RECLAIM;
1113                 bool dirty, writeback;
1114                 unsigned int nr_pages;
1115
1116                 cond_resched();
1117
1118                 page = lru_to_page(page_list);
1119                 list_del(&page->lru);
1120
1121                 if (!trylock_page(page))
1122                         goto keep;
1123
1124                 VM_BUG_ON_PAGE(PageActive(page), page);
1125
1126                 nr_pages = compound_nr(page);
1127
1128                 /* Account the number of base pages even though THP */
1129                 sc->nr_scanned += nr_pages;
1130
1131                 if (unlikely(!page_evictable(page)))
1132                         goto activate_locked;
1133
1134                 if (!sc->may_unmap && page_mapped(page))
1135                         goto keep_locked;
1136
1137                 /* page_update_gen() tried to promote this page? */
1138                 if (lru_gen_enabled() && !ignore_references &&
1139                     page_mapped(page) && PageReferenced(page))
1140                         goto keep_locked;
1141
1142                 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
1143                         (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
1144
1145                 /*
1146                  * The number of dirty pages determines if a node is marked
1147                  * reclaim_congested which affects wait_iff_congested. kswapd
1148                  * will stall and start writing pages if the tail of the LRU
1149                  * is all dirty unqueued pages.
1150                  */
1151                 page_check_dirty_writeback(page, &dirty, &writeback);
1152                 if (dirty || writeback)
1153                         stat->nr_dirty++;
1154
1155                 if (dirty && !writeback)
1156                         stat->nr_unqueued_dirty++;
1157
1158                 /*
1159                  * Treat this page as congested if the underlying BDI is or if
1160                  * pages are cycling through the LRU so quickly that the
1161                  * pages marked for immediate reclaim are making it to the
1162                  * end of the LRU a second time.
1163                  */
1164                 mapping = page_mapping(page);
1165                 if (((dirty || writeback) && mapping &&
1166                      inode_write_congested(mapping->host)) ||
1167                     (writeback && PageReclaim(page)))
1168                         stat->nr_congested++;
1169
1170                 /*
1171                  * If a page at the tail of the LRU is under writeback, there
1172                  * are three cases to consider.
1173                  *
1174                  * 1) If reclaim is encountering an excessive number of pages
1175                  *    under writeback and this page is both under writeback and
1176                  *    PageReclaim then it indicates that pages are being queued
1177                  *    for IO but are being recycled through the LRU before the
1178                  *    IO can complete. Waiting on the page itself risks an
1179                  *    indefinite stall if it is impossible to writeback the
1180                  *    page due to IO error or disconnected storage so instead
1181                  *    note that the LRU is being scanned too quickly and the
1182                  *    caller can stall after page list has been processed.
1183                  *
1184                  * 2) Global or new memcg reclaim encounters a page that is
1185                  *    not marked for immediate reclaim, or the caller does not
1186                  *    have __GFP_FS (or __GFP_IO if it's simply going to swap,
1187                  *    not to fs). In this case mark the page for immediate
1188                  *    reclaim and continue scanning.
1189                  *
1190                  *    Require may_enter_fs because we would wait on fs, which
1191                  *    may not have submitted IO yet. And the loop driver might
1192                  *    enter reclaim, and deadlock if it waits on a page for
1193                  *    which it is needed to do the write (loop masks off
1194                  *    __GFP_IO|__GFP_FS for this reason); but more thought
1195                  *    would probably show more reasons.
1196                  *
1197                  * 3) Legacy memcg encounters a page that is already marked
1198                  *    PageReclaim. memcg does not have any dirty pages
1199                  *    throttling so we could easily OOM just because too many
1200                  *    pages are in writeback and there is nothing else to
1201                  *    reclaim. Wait for the writeback to complete.
1202                  *
1203                  * In cases 1) and 2) we activate the pages to get them out of
1204                  * the way while we continue scanning for clean pages on the
1205                  * inactive list and refilling from the active list. The
1206                  * observation here is that waiting for disk writes is more
1207                  * expensive than potentially causing reloads down the line.
1208                  * Since they're marked for immediate reclaim, they won't put
1209                  * memory pressure on the cache working set any longer than it
1210                  * takes to write them to disk.
1211                  */
1212                 if (PageWriteback(page)) {
1213                         /* Case 1 above */
1214                         if (current_is_kswapd() &&
1215                             PageReclaim(page) &&
1216                             test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
1217                                 stat->nr_immediate++;
1218                                 goto activate_locked;
1219
1220                         /* Case 2 above */
1221                         } else if (writeback_throttling_sane(sc) ||
1222                             !PageReclaim(page) || !may_enter_fs) {
1223                                 /*
1224                                  * This is slightly racy - end_page_writeback()
1225                                  * might have just cleared PageReclaim, then
1226                                  * setting PageReclaim here end up interpreted
1227                                  * as PageReadahead - but that does not matter
1228                                  * enough to care.  What we do want is for this
1229                                  * page to have PageReclaim set next time memcg
1230                                  * reclaim reaches the tests above, so it will
1231                                  * then wait_on_page_writeback() to avoid OOM;
1232                                  * and it's also appropriate in global reclaim.
1233                                  */
1234                                 SetPageReclaim(page);
1235                                 stat->nr_writeback++;
1236                                 goto activate_locked;
1237
1238                         /* Case 3 above */
1239                         } else {
1240                                 unlock_page(page);
1241                                 wait_on_page_writeback(page);
1242                                 /* then go back and try same page again */
1243                                 list_add_tail(&page->lru, page_list);
1244                                 continue;
1245                         }
1246                 }
1247
1248                 if (!ignore_references)
1249                         references = page_check_references(page, sc);
1250
1251                 switch (references) {
1252                 case PAGEREF_ACTIVATE:
1253                         goto activate_locked;
1254                 case PAGEREF_KEEP:
1255                         stat->nr_ref_keep += nr_pages;
1256                         goto keep_locked;
1257                 case PAGEREF_RECLAIM:
1258                 case PAGEREF_RECLAIM_CLEAN:
1259                         ; /* try to reclaim the page below */
1260                 }
1261
1262                 /*
1263                  * Anonymous process memory has backing store?
1264                  * Try to allocate it some swap space here.
1265                  * Lazyfree page could be freed directly
1266                  */
1267                 if (PageAnon(page) && PageSwapBacked(page)) {
1268                         if (!PageSwapCache(page)) {
1269                                 if (!(sc->gfp_mask & __GFP_IO))
1270                                         goto keep_locked;
1271                                 if (PageTransHuge(page)) {
1272                                         /* cannot split THP, skip it */
1273                                         if (!can_split_huge_page(page, NULL))
1274                                                 goto activate_locked;
1275                                         /*
1276                                          * Split pages without a PMD map right
1277                                          * away. Chances are some or all of the
1278                                          * tail pages can be freed without IO.
1279                                          */
1280                                         if (!compound_mapcount(page) &&
1281                                             split_huge_page_to_list(page,
1282                                                                     page_list))
1283                                                 goto activate_locked;
1284                                 }
1285                                 if (!add_to_swap(page)) {
1286                                         if (!PageTransHuge(page))
1287                                                 goto activate_locked_split;
1288                                         /* Fallback to swap normal pages */
1289                                         if (split_huge_page_to_list(page,
1290                                                                     page_list))
1291                                                 goto activate_locked;
1292 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1293                                         count_vm_event(THP_SWPOUT_FALLBACK);
1294 #endif
1295                                         if (!add_to_swap(page))
1296                                                 goto activate_locked_split;
1297                                 }
1298
1299                                 may_enter_fs = 1;
1300
1301                                 /* Adding to swap updated mapping */
1302                                 mapping = page_mapping(page);
1303                         }
1304                 } else if (unlikely(PageTransHuge(page))) {
1305                         /* Split file THP */
1306                         if (split_huge_page_to_list(page, page_list))
1307                                 goto keep_locked;
1308                 }
1309
1310                 /*
1311                  * THP may get split above, need minus tail pages and update
1312                  * nr_pages to avoid accounting tail pages twice.
1313                  *
1314                  * The tail pages that are added into swap cache successfully
1315                  * reach here.
1316                  */
1317                 if ((nr_pages > 1) && !PageTransHuge(page)) {
1318                         sc->nr_scanned -= (nr_pages - 1);
1319                         nr_pages = 1;
1320                 }
1321
1322                 /*
1323                  * The page is mapped into the page tables of one or more
1324                  * processes. Try to unmap it here.
1325                  */
1326                 if (page_mapped(page)) {
1327                         enum ttu_flags flags = TTU_BATCH_FLUSH;
1328
1329                         if (unlikely(PageTransHuge(page)))
1330                                 flags |= TTU_SPLIT_HUGE_PMD;
1331                         if (!try_to_unmap(page, flags)) {
1332                                 stat->nr_unmap_fail += nr_pages;
1333                                 goto activate_locked;
1334                         }
1335                 }
1336
1337                 if (PageDirty(page)) {
1338                         /*
1339                          * Only kswapd can writeback filesystem pages
1340                          * to avoid risk of stack overflow. But avoid
1341                          * injecting inefficient single-page IO into
1342                          * flusher writeback as much as possible: only
1343                          * write pages when we've encountered many
1344                          * dirty pages, and when we've already scanned
1345                          * the rest of the LRU for clean pages and see
1346                          * the same dirty pages again (PageReclaim).
1347                          */
1348                         if (page_is_file_cache(page) &&
1349                             (!current_is_kswapd() || !PageReclaim(page) ||
1350                              !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
1351                                 /*
1352                                  * Immediately reclaim when written back.
1353                                  * Similar in principal to deactivate_page()
1354                                  * except we already have the page isolated
1355                                  * and know it's dirty
1356                                  */
1357                                 inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
1358                                 SetPageReclaim(page);
1359
1360                                 goto activate_locked;
1361                         }
1362
1363                         if (references == PAGEREF_RECLAIM_CLEAN)
1364                                 goto keep_locked;
1365                         if (!may_enter_fs)
1366                                 goto keep_locked;
1367                         if (!sc->may_writepage)
1368                                 goto keep_locked;
1369
1370                         /*
1371                          * Page is dirty. Flush the TLB if a writable entry
1372                          * potentially exists to avoid CPU writes after IO
1373                          * starts and then write it out here.
1374                          */
1375                         try_to_unmap_flush_dirty();
1376                         switch (pageout(page, mapping, sc)) {
1377                         case PAGE_KEEP:
1378                                 goto keep_locked;
1379                         case PAGE_ACTIVATE:
1380                                 goto activate_locked;
1381                         case PAGE_SUCCESS:
1382                                 if (PageWriteback(page))
1383                                         goto keep;
1384                                 if (PageDirty(page))
1385                                         goto keep;
1386
1387                                 /*
1388                                  * A synchronous write - probably a ramdisk.  Go
1389                                  * ahead and try to reclaim the page.
1390                                  */
1391                                 if (!trylock_page(page))
1392                                         goto keep;
1393                                 if (PageDirty(page) || PageWriteback(page))
1394                                         goto keep_locked;
1395                                 mapping = page_mapping(page);
1396                         case PAGE_CLEAN:
1397                                 ; /* try to free the page below */
1398                         }
1399                 }
1400
1401                 /*
1402                  * If the page has buffers, try to free the buffer mappings
1403                  * associated with this page. If we succeed we try to free
1404                  * the page as well.
1405                  *
1406                  * We do this even if the page is PageDirty().
1407                  * try_to_release_page() does not perform I/O, but it is
1408                  * possible for a page to have PageDirty set, but it is actually
1409                  * clean (all its buffers are clean).  This happens if the
1410                  * buffers were written out directly, with submit_bh(). ext3
1411                  * will do this, as well as the blockdev mapping.
1412                  * try_to_release_page() will discover that cleanness and will
1413                  * drop the buffers and mark the page clean - it can be freed.
1414                  *
1415                  * Rarely, pages can have buffers and no ->mapping.  These are
1416                  * the pages which were not successfully invalidated in
1417                  * truncate_complete_page().  We try to drop those buffers here
1418                  * and if that worked, and the page is no longer mapped into
1419                  * process address space (page_count == 1) it can be freed.
1420                  * Otherwise, leave the page on the LRU so it is swappable.
1421                  */
1422                 if (page_has_private(page)) {
1423                         if (!try_to_release_page(page, sc->gfp_mask))
1424                                 goto activate_locked;
1425                         if (!mapping && page_count(page) == 1) {
1426                                 unlock_page(page);
1427                                 if (put_page_testzero(page))
1428                                         goto free_it;
1429                                 else {
1430                                         /*
1431                                          * rare race with speculative reference.
1432                                          * the speculative reference will free
1433                                          * this page shortly, so we may
1434                                          * increment nr_reclaimed here (and
1435                                          * leave it off the LRU).
1436                                          */
1437                                         nr_reclaimed++;
1438                                         continue;
1439                                 }
1440                         }
1441                 }
1442
1443                 if (PageAnon(page) && !PageSwapBacked(page)) {
1444                         /* follow __remove_mapping for reference */
1445                         if (!page_ref_freeze(page, 1))
1446                                 goto keep_locked;
1447                         if (PageDirty(page)) {
1448                                 page_ref_unfreeze(page, 1);
1449                                 goto keep_locked;
1450                         }
1451
1452                         count_vm_event(PGLAZYFREED);
1453                         count_memcg_page_event(page, PGLAZYFREED);
1454                 } else if (!mapping || !__remove_mapping(mapping, page, true,
1455                                                          sc->target_mem_cgroup))
1456                         goto keep_locked;
1457
1458                 unlock_page(page);
1459 free_it:
1460                 /*
1461                  * THP may get swapped out in a whole, need account
1462                  * all base pages.
1463                  */
1464                 nr_reclaimed += nr_pages;
1465
1466                 /*
1467                  * Is there need to periodically free_page_list? It would
1468                  * appear not as the counts should be low
1469                  */
1470                 if (unlikely(PageTransHuge(page)))
1471                         (*get_compound_page_dtor(page))(page);
1472                 else
1473                         list_add(&page->lru, &free_pages);
1474                 continue;
1475
1476 activate_locked_split:
1477                 /*
1478                  * The tail pages that are failed to add into swap cache
1479                  * reach here.  Fixup nr_scanned and nr_pages.
1480                  */
1481                 if (nr_pages > 1) {
1482                         sc->nr_scanned -= (nr_pages - 1);
1483                         nr_pages = 1;
1484                 }
1485 activate_locked:
1486                 /* Not a candidate for swapping, so reclaim swap space. */
1487                 if (PageSwapCache(page) && (mem_cgroup_swap_full(page) ||
1488                                                 PageMlocked(page)))
1489                         try_to_free_swap(page);
1490                 VM_BUG_ON_PAGE(PageActive(page), page);
1491                 if (!PageMlocked(page)) {
1492                         int type = page_is_file_cache(page);
1493                         SetPageActive(page);
1494                         stat->nr_activate[type] += nr_pages;
1495                         count_memcg_page_event(page, PGACTIVATE);
1496                 }
1497 keep_locked:
1498                 unlock_page(page);
1499 keep:
1500                 list_add(&page->lru, &ret_pages);
1501                 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
1502         }
1503
1504         pgactivate = stat->nr_activate[0] + stat->nr_activate[1];
1505
1506         mem_cgroup_uncharge_list(&free_pages);
1507         try_to_unmap_flush();
1508         free_unref_page_list(&free_pages);
1509
1510         list_splice(&ret_pages, page_list);
1511         count_vm_events(PGACTIVATE, pgactivate);
1512
1513         return nr_reclaimed;
1514 }
1515
1516 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
1517                                             struct list_head *page_list)
1518 {
1519         struct scan_control sc = {
1520                 .gfp_mask = GFP_KERNEL,
1521                 .priority = DEF_PRIORITY,
1522                 .may_unmap = 1,
1523         };
1524         struct reclaim_stat dummy_stat;
1525         unsigned long ret;
1526         struct page *page, *next;
1527         LIST_HEAD(clean_pages);
1528
1529         list_for_each_entry_safe(page, next, page_list, lru) {
1530                 if (page_is_file_cache(page) && !PageDirty(page) &&
1531                     !__PageMovable(page) && !PageUnevictable(page)) {
1532                         ClearPageActive(page);
1533                         list_move(&page->lru, &clean_pages);
1534                 }
1535         }
1536
1537         ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
1538                                 &dummy_stat, true);
1539         list_splice(&clean_pages, page_list);
1540         mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);
1541         return ret;
1542 }
1543
1544 /*
1545  * Attempt to remove the specified page from its LRU.  Only take this page
1546  * if it is of the appropriate PageActive status.  Pages which are being
1547  * freed elsewhere are also ignored.
1548  *
1549  * page:        page to consider
1550  * mode:        one of the LRU isolation modes defined above
1551  *
1552  * returns 0 on success, -ve errno on failure.
1553  */
1554 int __isolate_lru_page(struct page *page, isolate_mode_t mode)
1555 {
1556         int ret = -EBUSY;
1557
1558         /* Only take pages on the LRU. */
1559         if (!PageLRU(page))
1560                 return ret;
1561
1562         /* Compaction should not handle unevictable pages but CMA can do so */
1563         if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
1564                 return ret;
1565
1566         /*
1567          * To minimise LRU disruption, the caller can indicate that it only
1568          * wants to isolate pages it will be able to operate on without
1569          * blocking - clean pages for the most part.
1570          *
1571          * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
1572          * that it is possible to migrate without blocking
1573          */
1574         if (mode & ISOLATE_ASYNC_MIGRATE) {
1575                 /* All the caller can do on PageWriteback is block */
1576                 if (PageWriteback(page))
1577                         return ret;
1578
1579                 if (PageDirty(page)) {
1580                         struct address_space *mapping;
1581                         bool migrate_dirty;
1582
1583                         /*
1584                          * Only pages without mappings or that have a
1585                          * ->migratepage callback are possible to migrate
1586                          * without blocking. However, we can be racing with
1587                          * truncation so it's necessary to lock the page
1588                          * to stabilise the mapping as truncation holds
1589                          * the page lock until after the page is removed
1590                          * from the page cache.
1591                          */
1592                         if (!trylock_page(page))
1593                                 return ret;
1594
1595                         mapping = page_mapping(page);
1596                         migrate_dirty = !mapping || mapping->a_ops->migratepage;
1597                         unlock_page(page);
1598                         if (!migrate_dirty)
1599                                 return ret;
1600                 }
1601         }
1602
1603         if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
1604                 return ret;
1605
1606         if (likely(get_page_unless_zero(page))) {
1607                 /*
1608                  * Be careful not to clear PageLRU until after we're
1609                  * sure the page is not being freed elsewhere -- the
1610                  * page release code relies on it.
1611                  */
1612                 if (TestClearPageLRU(page))
1613                         ret = 0;
1614                 else
1615                         put_page(page);
1616         }
1617
1618         return ret;
1619 }
1620
1621
1622 /*
1623  * Update LRU sizes after isolating pages. The LRU size updates must
1624  * be complete before mem_cgroup_update_lru_size due to a santity check.
1625  */
1626 static __always_inline void update_lru_sizes(struct lruvec *lruvec,
1627                         enum lru_list lru, unsigned long *nr_zone_taken)
1628 {
1629         int zid;
1630
1631         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1632                 if (!nr_zone_taken[zid])
1633                         continue;
1634
1635                 __update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1636 #ifdef CONFIG_MEMCG
1637                 mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1638 #endif
1639         }
1640
1641 }
1642
1643 /**
1644  * pgdat->lru_lock is heavily contended.  Some of the functions that
1645  * shrink the lists perform better by taking out a batch of pages
1646  * and working on them outside the LRU lock.
1647  *
1648  * For pagecache intensive workloads, this function is the hottest
1649  * spot in the kernel (apart from copy_*_user functions).
1650  *
1651  * Appropriate locks must be held before calling this function.
1652  *
1653  * @nr_to_scan: The number of eligible pages to look through on the list.
1654  * @lruvec:     The LRU vector to pull pages from.
1655  * @dst:        The temp list to put pages on to.
1656  * @nr_scanned: The number of pages that were scanned.
1657  * @sc:         The scan_control struct for this reclaim session
1658  * @mode:       One of the LRU isolation modes
1659  * @lru:        LRU list id for isolating
1660  *
1661  * returns how many pages were moved onto *@dst.
1662  */
1663 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1664                 struct lruvec *lruvec, struct list_head *dst,
1665                 unsigned long *nr_scanned, struct scan_control *sc,
1666                 enum lru_list lru)
1667 {
1668         struct list_head *src = &lruvec->lists[lru];
1669         unsigned long nr_taken = 0;
1670         unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
1671         unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
1672         unsigned long skipped = 0;
1673         unsigned long scan, total_scan, nr_pages;
1674         LIST_HEAD(pages_skipped);
1675         isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED);
1676
1677         total_scan = 0;
1678         scan = 0;
1679         while (scan < nr_to_scan && !list_empty(src)) {
1680                 struct page *page;
1681
1682                 page = lru_to_page(src);
1683                 prefetchw_prev_lru_page(page, src, flags);
1684
1685                 nr_pages = compound_nr(page);
1686                 total_scan += nr_pages;
1687
1688                 if (page_zonenum(page) > sc->reclaim_idx) {
1689                         list_move(&page->lru, &pages_skipped);
1690                         nr_skipped[page_zonenum(page)] += nr_pages;
1691                         continue;
1692                 }
1693
1694                 /*
1695                  * Do not count skipped pages because that makes the function
1696                  * return with no isolated pages if the LRU mostly contains
1697                  * ineligible pages.  This causes the VM to not reclaim any
1698                  * pages, triggering a premature OOM.
1699                  *
1700                  * Account all tail pages of THP.  This would not cause
1701                  * premature OOM since __isolate_lru_page() returns -EBUSY
1702                  * only when the page is being freed somewhere else.
1703                  */
1704                 scan += nr_pages;
1705                 switch (__isolate_lru_page(page, mode)) {
1706                 case 0:
1707                         nr_taken += nr_pages;
1708                         nr_zone_taken[page_zonenum(page)] += nr_pages;
1709                         list_move(&page->lru, dst);
1710                         break;
1711
1712                 case -EBUSY:
1713                         /* else it is being freed elsewhere */
1714                         list_move(&page->lru, src);
1715                         continue;
1716
1717                 default:
1718                         BUG();
1719                 }
1720         }
1721
1722         /*
1723          * Splice any skipped pages to the start of the LRU list. Note that
1724          * this disrupts the LRU order when reclaiming for lower zones but
1725          * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX
1726          * scanning would soon rescan the same pages to skip and put the
1727          * system at risk of premature OOM.
1728          */
1729         if (!list_empty(&pages_skipped)) {
1730                 int zid;
1731
1732                 list_splice(&pages_skipped, src);
1733                 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1734                         if (!nr_skipped[zid])
1735                                 continue;
1736
1737                         __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
1738                         skipped += nr_skipped[zid];
1739                 }
1740         }
1741         *nr_scanned = total_scan;
1742         trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
1743                                     total_scan, skipped, nr_taken, mode, lru);
1744         update_lru_sizes(lruvec, lru, nr_zone_taken);
1745         return nr_taken;
1746 }
1747
1748 /**
1749  * isolate_lru_page - tries to isolate a page from its LRU list
1750  * @page: page to isolate from its LRU list
1751  *
1752  * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1753  * vmstat statistic corresponding to whatever LRU list the page was on.
1754  *
1755  * Returns 0 if the page was removed from an LRU list.
1756  * Returns -EBUSY if the page was not on an LRU list.
1757  *
1758  * The returned page will have PageLRU() cleared.  If it was found on
1759  * the active list, it will have PageActive set.  If it was found on
1760  * the unevictable list, it will have the PageUnevictable bit set. That flag
1761  * may need to be cleared by the caller before letting the page go.
1762  *
1763  * The vmstat statistic corresponding to the list on which the page was
1764  * found will be decremented.
1765  *
1766  * Restrictions:
1767  *
1768  * (1) Must be called with an elevated refcount on the page. This is a
1769  *     fundamentnal difference from isolate_lru_pages (which is called
1770  *     without a stable reference).
1771  * (2) the lru_lock must not be held.
1772  * (3) interrupts must be enabled.
1773  */
1774 int isolate_lru_page(struct page *page)
1775 {
1776         int ret = -EBUSY;
1777
1778         VM_BUG_ON_PAGE(!page_count(page), page);
1779         WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
1780
1781         if (TestClearPageLRU(page)) {
1782                 pg_data_t *pgdat = page_pgdat(page);
1783                 struct lruvec *lruvec;
1784
1785                 get_page(page);
1786                 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1787                 spin_lock_irq(&pgdat->lru_lock);
1788                 del_page_from_lru_list(page, lruvec, page_lru(page));
1789                 spin_unlock_irq(&pgdat->lru_lock);
1790                 ret = 0;
1791         }
1792
1793         return ret;
1794 }
1795
1796 /*
1797  * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
1798  * then get resheduled. When there are massive number of tasks doing page
1799  * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
1800  * the LRU list will go small and be scanned faster than necessary, leading to
1801  * unnecessary swapping, thrashing and OOM.
1802  */
1803 static int too_many_isolated(struct pglist_data *pgdat, int file,
1804                 struct scan_control *sc)
1805 {
1806         unsigned long inactive, isolated;
1807
1808         if (current_is_kswapd())
1809                 return 0;
1810
1811         if (!writeback_throttling_sane(sc))
1812                 return 0;
1813
1814         if (file) {
1815                 inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
1816                 isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
1817         } else {
1818                 inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
1819                 isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
1820         }
1821
1822         /*
1823          * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
1824          * won't get blocked by normal direct-reclaimers, forming a circular
1825          * deadlock.
1826          */
1827         if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
1828                 inactive >>= 3;
1829
1830         return isolated > inactive;
1831 }
1832
1833 /*
1834  * This moves pages from @list to corresponding LRU list.
1835  *
1836  * We move them the other way if the page is referenced by one or more
1837  * processes, from rmap.
1838  *
1839  * If the pages are mostly unmapped, the processing is fast and it is
1840  * appropriate to hold zone_lru_lock across the whole operation.  But if
1841  * the pages are mapped, the processing is slow (page_referenced()) so we
1842  * should drop zone_lru_lock around each page.  It's impossible to balance
1843  * this, so instead we remove the pages from the LRU while processing them.
1844  * It is safe to rely on PG_active against the non-LRU pages in here because
1845  * nobody will play with that bit on a non-LRU page.
1846  *
1847  * The downside is that we have to touch page->_refcount against each page.
1848  * But we had to alter page->flags anyway.
1849  *
1850  * Returns the number of pages moved to the given lruvec.
1851  */
1852
1853 static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
1854                                                      struct list_head *list)
1855 {
1856         struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1857         int nr_pages, nr_moved = 0;
1858         LIST_HEAD(pages_to_free);
1859         struct page *page;
1860         enum lru_list lru;
1861
1862         while (!list_empty(list)) {
1863                 page = lru_to_page(list);
1864                 VM_BUG_ON_PAGE(PageLRU(page), page);
1865                 if (unlikely(!page_evictable(page))) {
1866                         list_del(&page->lru);
1867                         spin_unlock_irq(&pgdat->lru_lock);
1868                         putback_lru_page(page);
1869                         spin_lock_irq(&pgdat->lru_lock);
1870                         continue;
1871                 }
1872                 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1873
1874                 SetPageLRU(page);
1875                 lru = page_lru(page);
1876
1877                 nr_pages = hpage_nr_pages(page);
1878                 update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
1879                 list_move(&page->lru, &lruvec->lists[lru]);
1880
1881                 if (put_page_testzero(page)) {
1882                         __ClearPageLRU(page);
1883                         __ClearPageActive(page);
1884                         del_page_from_lru_list(page, lruvec, lru);
1885
1886                         if (unlikely(PageCompound(page))) {
1887                                 spin_unlock_irq(&pgdat->lru_lock);
1888                                 (*get_compound_page_dtor(page))(page);
1889                                 spin_lock_irq(&pgdat->lru_lock);
1890                         } else
1891                                 list_add(&page->lru, &pages_to_free);
1892                 } else {
1893                         nr_moved += nr_pages;
1894                 }
1895         }
1896
1897         /*
1898          * To save our caller's stack, now use input list for pages to free.
1899          */
1900         list_splice(&pages_to_free, list);
1901
1902         return nr_moved;
1903 }
1904
1905 /*
1906  * If a kernel thread (such as nfsd for loop-back mounts) services
1907  * a backing device by writing to the page cache it sets PF_LESS_THROTTLE.
1908  * In that case we should only throttle if the backing device it is
1909  * writing to is congested.  In other cases it is safe to throttle.
1910  */
1911 static int current_may_throttle(void)
1912 {
1913         return !(current->flags & PF_LESS_THROTTLE) ||
1914                 current->backing_dev_info == NULL ||
1915                 bdi_write_congested(current->backing_dev_info);
1916 }
1917
1918 /*
1919  * shrink_inactive_list() is a helper for shrink_node().  It returns the number
1920  * of reclaimed pages
1921  */
1922 static noinline_for_stack unsigned long
1923 shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1924                      struct scan_control *sc, enum lru_list lru)
1925 {
1926         LIST_HEAD(page_list);
1927         unsigned long nr_scanned;
1928         unsigned long nr_reclaimed = 0;
1929         unsigned long nr_taken;
1930         struct reclaim_stat stat;
1931         int file = is_file_lru(lru);
1932         enum vm_event_item item;
1933         struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1934         struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1935         bool stalled = false;
1936
1937         while (unlikely(too_many_isolated(pgdat, file, sc))) {
1938                 if (stalled)
1939                         return 0;
1940
1941                 /* wait a bit for the reclaimer. */
1942                 msleep(100);
1943                 stalled = true;
1944
1945                 /* We are about to die and free our memory. Return now. */
1946                 if (fatal_signal_pending(current))
1947                         return SWAP_CLUSTER_MAX;
1948         }
1949
1950         lru_add_drain();
1951
1952         spin_lock_irq(&pgdat->lru_lock);
1953
1954         nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
1955                                      &nr_scanned, sc, lru);
1956
1957         __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
1958         reclaim_stat->recent_scanned[file] += nr_taken;
1959
1960         item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
1961         if (!cgroup_reclaim(sc))
1962                 __count_vm_events(item, nr_scanned);
1963         __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
1964         spin_unlock_irq(&pgdat->lru_lock);
1965
1966         if (nr_taken == 0)
1967                 return 0;
1968
1969         nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, &stat, false);
1970
1971         spin_lock_irq(&pgdat->lru_lock);
1972
1973         item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
1974         if (!cgroup_reclaim(sc))
1975                 __count_vm_events(item, nr_reclaimed);
1976         __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
1977         reclaim_stat->recent_rotated[0] += stat.nr_activate[0];
1978         reclaim_stat->recent_rotated[1] += stat.nr_activate[1];
1979
1980         move_pages_to_lru(lruvec, &page_list);
1981
1982         __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
1983
1984         spin_unlock_irq(&pgdat->lru_lock);
1985
1986         mem_cgroup_uncharge_list(&page_list);
1987         free_unref_page_list(&page_list);
1988
1989         /*
1990          * If dirty pages are scanned that are not queued for IO, it
1991          * implies that flushers are not doing their job. This can
1992          * happen when memory pressure pushes dirty pages to the end of
1993          * the LRU before the dirty limits are breached and the dirty
1994          * data has expired. It can also happen when the proportion of
1995          * dirty pages grows not through writes but through memory
1996          * pressure reclaiming all the clean cache. And in some cases,
1997          * the flushers simply cannot keep up with the allocation
1998          * rate. Nudge the flusher threads in case they are asleep.
1999          */
2000         if (stat.nr_unqueued_dirty == nr_taken)
2001                 wakeup_flusher_threads(WB_REASON_VMSCAN);
2002
2003         sc->nr.dirty += stat.nr_dirty;
2004         sc->nr.congested += stat.nr_congested;
2005         sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
2006         sc->nr.writeback += stat.nr_writeback;
2007         sc->nr.immediate += stat.nr_immediate;
2008         sc->nr.taken += nr_taken;
2009         if (file)
2010                 sc->nr.file_taken += nr_taken;
2011
2012         trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
2013                         nr_scanned, nr_reclaimed, &stat, sc->priority, file);
2014         return nr_reclaimed;
2015 }
2016
2017 static void shrink_active_list(unsigned long nr_to_scan,
2018                                struct lruvec *lruvec,
2019                                struct scan_control *sc,
2020                                enum lru_list lru)
2021 {
2022         unsigned long nr_taken;
2023         unsigned long nr_scanned;
2024         unsigned long vm_flags;
2025         LIST_HEAD(l_hold);      /* The pages which were snipped off */
2026         LIST_HEAD(l_active);
2027         LIST_HEAD(l_inactive);
2028         struct page *page;
2029         struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
2030         unsigned nr_deactivate, nr_activate;
2031         unsigned nr_rotated = 0;
2032         int file = is_file_lru(lru);
2033         struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2034
2035         lru_add_drain();
2036
2037         spin_lock_irq(&pgdat->lru_lock);
2038
2039         nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
2040                                      &nr_scanned, sc, lru);
2041
2042         __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
2043         reclaim_stat->recent_scanned[file] += nr_taken;
2044
2045         __count_vm_events(PGREFILL, nr_scanned);
2046         __count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
2047
2048         spin_unlock_irq(&pgdat->lru_lock);
2049
2050         while (!list_empty(&l_hold)) {
2051                 cond_resched();
2052                 page = lru_to_page(&l_hold);
2053                 list_del(&page->lru);
2054
2055                 if (unlikely(!page_evictable(page))) {
2056                         putback_lru_page(page);
2057                         continue;
2058                 }
2059
2060                 if (unlikely(buffer_heads_over_limit)) {
2061                         if (page_has_private(page) && trylock_page(page)) {
2062                                 if (page_has_private(page))
2063                                         try_to_release_page(page, 0);
2064                                 unlock_page(page);
2065                         }
2066                 }
2067
2068                 if (page_referenced(page, 0, sc->target_mem_cgroup,
2069                                     &vm_flags)) {
2070                         nr_rotated += hpage_nr_pages(page);
2071                         /*
2072                          * Identify referenced, file-backed active pages and
2073                          * give them one more trip around the active list. So
2074                          * that executable code get better chances to stay in
2075                          * memory under moderate memory pressure.  Anon pages
2076                          * are not likely to be evicted by use-once streaming
2077                          * IO, plus JVM can create lots of anon VM_EXEC pages,
2078                          * so we ignore them here.
2079                          */
2080                         if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
2081                                 list_add(&page->lru, &l_active);
2082                                 continue;
2083                         }
2084                 }
2085
2086                 ClearPageActive(page);  /* we are de-activating */
2087                 SetPageWorkingset(page);
2088                 list_add(&page->lru, &l_inactive);
2089         }
2090
2091         /*
2092          * Move pages back to the lru list.
2093          */
2094         spin_lock_irq(&pgdat->lru_lock);
2095         /*
2096          * Count referenced pages from currently used mappings as rotated,
2097          * even though only some of them are actually re-activated.  This
2098          * helps balance scan pressure between file and anonymous pages in
2099          * get_scan_count.
2100          */
2101         reclaim_stat->recent_rotated[file] += nr_rotated;
2102
2103         nr_activate = move_pages_to_lru(lruvec, &l_active);
2104         nr_deactivate = move_pages_to_lru(lruvec, &l_inactive);
2105         /* Keep all free pages in l_active list */
2106         list_splice(&l_inactive, &l_active);
2107
2108         __count_vm_events(PGDEACTIVATE, nr_deactivate);
2109         __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
2110
2111         __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
2112         spin_unlock_irq(&pgdat->lru_lock);
2113
2114         mem_cgroup_uncharge_list(&l_active);
2115         free_unref_page_list(&l_active);
2116         trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
2117                         nr_deactivate, nr_rotated, sc->priority, file);
2118 }
2119
2120 unsigned long reclaim_pages(struct list_head *page_list)
2121 {
2122         int nid = -1;
2123         unsigned long nr_reclaimed = 0;
2124         LIST_HEAD(node_page_list);
2125         struct reclaim_stat dummy_stat;
2126         struct page *page;
2127         struct scan_control sc = {
2128                 .gfp_mask = GFP_KERNEL,
2129                 .priority = DEF_PRIORITY,
2130                 .may_writepage = 1,
2131                 .may_unmap = 1,
2132                 .may_swap = 1,
2133         };
2134
2135         while (!list_empty(page_list)) {
2136                 page = lru_to_page(page_list);
2137                 if (nid == -1) {
2138                         nid = page_to_nid(page);
2139                         INIT_LIST_HEAD(&node_page_list);
2140                 }
2141
2142                 if (nid == page_to_nid(page)) {
2143                         ClearPageActive(page);
2144                         list_move(&page->lru, &node_page_list);
2145                         continue;
2146                 }
2147
2148                 nr_reclaimed += shrink_page_list(&node_page_list,
2149                                                 NODE_DATA(nid),
2150                                                 &sc, &dummy_stat, false);
2151                 while (!list_empty(&node_page_list)) {
2152                         page = lru_to_page(&node_page_list);
2153                         list_del(&page->lru);
2154                         putback_lru_page(page);
2155                 }
2156
2157                 nid = -1;
2158         }
2159
2160         if (!list_empty(&node_page_list)) {
2161                 nr_reclaimed += shrink_page_list(&node_page_list,
2162                                                 NODE_DATA(nid),
2163                                                 &sc, &dummy_stat, false);
2164                 while (!list_empty(&node_page_list)) {
2165                         page = lru_to_page(&node_page_list);
2166                         list_del(&page->lru);
2167                         putback_lru_page(page);
2168                 }
2169         }
2170
2171         return nr_reclaimed;
2172 }
2173
2174 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
2175                                  struct lruvec *lruvec, struct scan_control *sc)
2176 {
2177         if (is_active_lru(lru)) {
2178                 if (sc->may_deactivate & (1 << is_file_lru(lru)))
2179                         shrink_active_list(nr_to_scan, lruvec, sc, lru);
2180                 else
2181                         sc->skipped_deactivate = 1;
2182                 return 0;
2183         }
2184
2185         return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
2186 }
2187
2188 /*
2189  * The inactive anon list should be small enough that the VM never has
2190  * to do too much work.
2191  *
2192  * The inactive file list should be small enough to leave most memory
2193  * to the established workingset on the scan-resistant active list,
2194  * but large enough to avoid thrashing the aggregate readahead window.
2195  *
2196  * Both inactive lists should also be large enough that each inactive
2197  * page has a chance to be referenced again before it is reclaimed.
2198  *
2199  * If that fails and refaulting is observed, the inactive list grows.
2200  *
2201  * The inactive_ratio is the target ratio of ACTIVE to INACTIVE pages
2202  * on this LRU, maintained by the pageout code. An inactive_ratio
2203  * of 3 means 3:1 or 25% of the pages are kept on the inactive list.
2204  *
2205  * total     target    max
2206  * memory    ratio     inactive
2207  * -------------------------------------
2208  *   10MB       1         5MB
2209  *  100MB       1        50MB
2210  *    1GB       3       250MB
2211  *   10GB      10       0.9GB
2212  *  100GB      31         3GB
2213  *    1TB     101        10GB
2214  *   10TB     320        32GB
2215  */
2216 static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru)
2217 {
2218         enum lru_list active_lru = inactive_lru + LRU_ACTIVE;
2219         unsigned long inactive, active;
2220         unsigned long inactive_ratio;
2221         unsigned long gb;
2222
2223         inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru);
2224         active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru);
2225
2226         gb = (inactive + active) >> (30 - PAGE_SHIFT);
2227         if (gb)
2228                 inactive_ratio = int_sqrt(10 * gb);
2229         else
2230                 inactive_ratio = 1;
2231
2232         return inactive * inactive_ratio < active;
2233 }
2234
2235 enum scan_balance {
2236         SCAN_EQUAL,
2237         SCAN_FRACT,
2238         SCAN_ANON,
2239         SCAN_FILE,
2240 };
2241
2242 static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc)
2243 {
2244         unsigned long file;
2245         struct lruvec *target_lruvec;
2246
2247         if (lru_gen_enabled())
2248                 return;
2249
2250         target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
2251
2252         /*
2253          * Target desirable inactive:active list ratios for the anon
2254          * and file LRU lists.
2255          */
2256         if (!sc->force_deactivate) {
2257                 unsigned long refaults;
2258
2259                 if (inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
2260                         sc->may_deactivate |= DEACTIVATE_ANON;
2261                 else
2262                         sc->may_deactivate &= ~DEACTIVATE_ANON;
2263
2264                 /*
2265                  * When refaults are being observed, it means a new
2266                  * workingset is being established. Deactivate to get
2267                  * rid of any stale active pages quickly.
2268                  */
2269                 refaults = lruvec_page_state(target_lruvec,
2270                                              WORKINGSET_ACTIVATE);
2271                 if (refaults != target_lruvec->refaults ||
2272                     inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
2273                         sc->may_deactivate |= DEACTIVATE_FILE;
2274                 else
2275                         sc->may_deactivate &= ~DEACTIVATE_FILE;
2276         } else
2277                 sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
2278
2279         /*
2280          * If we have plenty of inactive file pages that aren't
2281          * thrashing, try to reclaim those first before touching
2282          * anonymous pages.
2283          */
2284         file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
2285         if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
2286                 sc->cache_trim_mode = 1;
2287         else
2288                 sc->cache_trim_mode = 0;
2289
2290         /*
2291          * Prevent the reclaimer from falling into the cache trap: as
2292          * cache pages start out inactive, every cache fault will tip
2293          * the scan balance towards the file LRU.  And as the file LRU
2294          * shrinks, so does the window for rotation from references.
2295          * This means we have a runaway feedback loop where a tiny
2296          * thrashing file LRU becomes infinitely more attractive than
2297          * anon pages.  Try to detect this based on file LRU size.
2298          */
2299         if (!cgroup_reclaim(sc)) {
2300                 unsigned long total_high_wmark = 0;
2301                 unsigned long free, anon;
2302                 int z;
2303
2304                 free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
2305                 file = node_page_state(pgdat, NR_ACTIVE_FILE) +
2306                            node_page_state(pgdat, NR_INACTIVE_FILE);
2307
2308                 for (z = 0; z < MAX_NR_ZONES; z++) {
2309                         struct zone *zone = &pgdat->node_zones[z];
2310
2311                         if (!managed_zone(zone))
2312                                 continue;
2313
2314                         total_high_wmark += high_wmark_pages(zone);
2315                 }
2316
2317                 /*
2318                  * Consider anon: if that's low too, this isn't a
2319                  * runaway file reclaim problem, but rather just
2320                  * extreme pressure. Reclaim as per usual then.
2321                  */
2322                 anon = node_page_state(pgdat, NR_INACTIVE_ANON);
2323
2324                 sc->file_is_tiny =
2325                         file + free <= total_high_wmark &&
2326                         !(sc->may_deactivate & DEACTIVATE_ANON) &&
2327                         anon >> sc->priority;
2328         }
2329 }
2330
2331 /*
2332  * Determine how aggressively the anon and file LRU lists should be
2333  * scanned.  The relative value of each set of LRU lists is determined
2334  * by looking at the fraction of the pages scanned we did rotate back
2335  * onto the active list instead of evict.
2336  *
2337  * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
2338  * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
2339  */
2340 static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
2341                            unsigned long *nr)
2342 {
2343         struct mem_cgroup *memcg = lruvec_memcg(lruvec);
2344         int swappiness = mem_cgroup_swappiness(memcg);
2345         struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
2346         u64 fraction[ANON_AND_FILE];
2347         u64 denominator = 0;    /* gcc */
2348         struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2349         unsigned long anon_prio, file_prio;
2350         enum scan_balance scan_balance;
2351         unsigned long anon, file;
2352         unsigned long ap, fp;
2353         enum lru_list lru;
2354
2355         /* If we have no swap space, do not bother scanning anon pages. */
2356         if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
2357                 scan_balance = SCAN_FILE;
2358                 goto out;
2359         }
2360
2361         /*
2362          * Global reclaim will swap to prevent OOM even with no
2363          * swappiness, but memcg users want to use this knob to
2364          * disable swapping for individual groups completely when
2365          * using the memory controller's swap limit feature would be
2366          * too expensive.
2367          */
2368         if (cgroup_reclaim(sc) && !swappiness) {
2369                 scan_balance = SCAN_FILE;
2370                 goto out;
2371         }
2372
2373         /*
2374          * Do not apply any pressure balancing cleverness when the
2375          * system is close to OOM, scan both anon and file equally
2376          * (unless the swappiness setting disagrees with swapping).
2377          */
2378         if (!sc->priority && swappiness) {
2379                 scan_balance = SCAN_EQUAL;
2380                 goto out;
2381         }
2382
2383         /*
2384          * If the system is almost out of file pages, force-scan anon.
2385          */
2386         if (sc->file_is_tiny) {
2387                 scan_balance = SCAN_ANON;
2388                 goto out;
2389         }
2390
2391         /*
2392          * If there is enough inactive page cache, we do not reclaim
2393          * anything from the anonymous working right now.
2394          */
2395         if (sc->cache_trim_mode) {
2396                 scan_balance = SCAN_FILE;
2397                 goto out;
2398         }
2399
2400         scan_balance = SCAN_FRACT;
2401
2402         /*
2403          * With swappiness at 100, anonymous and file have the same priority.
2404          * This scanning priority is essentially the inverse of IO cost.
2405          */
2406         anon_prio = swappiness;
2407         file_prio = 200 - anon_prio;
2408
2409         /*
2410          * OK, so we have swap space and a fair amount of page cache
2411          * pages.  We use the recently rotated / recently scanned
2412          * ratios to determine how valuable each cache is.
2413          *
2414          * Because workloads change over time (and to avoid overflow)
2415          * we keep these statistics as a floating average, which ends
2416          * up weighing recent references more than old ones.
2417          *
2418          * anon in [0], file in [1]
2419          */
2420
2421         anon  = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) +
2422                 lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES);
2423         file  = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES) +
2424                 lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, MAX_NR_ZONES);
2425
2426         spin_lock_irq(&pgdat->lru_lock);
2427         if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
2428                 reclaim_stat->recent_scanned[0] /= 2;
2429                 reclaim_stat->recent_rotated[0] /= 2;
2430         }
2431
2432         if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
2433                 reclaim_stat->recent_scanned[1] /= 2;
2434                 reclaim_stat->recent_rotated[1] /= 2;
2435         }
2436
2437         /*
2438          * The amount of pressure on anon vs file pages is inversely
2439          * proportional to the fraction of recently scanned pages on
2440          * each list that were recently referenced and in active use.
2441          */
2442         ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
2443         ap /= reclaim_stat->recent_rotated[0] + 1;
2444
2445         fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
2446         fp /= reclaim_stat->recent_rotated[1] + 1;
2447         spin_unlock_irq(&pgdat->lru_lock);
2448
2449         fraction[0] = ap;
2450         fraction[1] = fp;
2451         denominator = ap + fp + 1;
2452 out:
2453         for_each_evictable_lru(lru) {
2454                 int file = is_file_lru(lru);
2455                 unsigned long lruvec_size;
2456                 unsigned long scan;
2457                 unsigned long protection;
2458
2459                 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
2460                 protection = mem_cgroup_protection(memcg,
2461                                                    sc->memcg_low_reclaim);
2462
2463                 if (protection) {
2464                         /*
2465                          * Scale a cgroup's reclaim pressure by proportioning
2466                          * its current usage to its memory.low or memory.min
2467                          * setting.
2468                          *
2469                          * This is important, as otherwise scanning aggression
2470                          * becomes extremely binary -- from nothing as we
2471                          * approach the memory protection threshold, to totally
2472                          * nominal as we exceed it.  This results in requiring
2473                          * setting extremely liberal protection thresholds. It
2474                          * also means we simply get no protection at all if we
2475                          * set it too low, which is not ideal.
2476                          *
2477                          * If there is any protection in place, we reduce scan
2478                          * pressure by how much of the total memory used is
2479                          * within protection thresholds.
2480                          *
2481                          * There is one special case: in the first reclaim pass,
2482                          * we skip over all groups that are within their low
2483                          * protection. If that fails to reclaim enough pages to
2484                          * satisfy the reclaim goal, we come back and override
2485                          * the best-effort low protection. However, we still
2486                          * ideally want to honor how well-behaved groups are in
2487                          * that case instead of simply punishing them all
2488                          * equally. As such, we reclaim them based on how much
2489                          * memory they are using, reducing the scan pressure
2490                          * again by how much of the total memory used is under
2491                          * hard protection.
2492                          */
2493                         unsigned long cgroup_size = mem_cgroup_size(memcg);
2494
2495                         /* Avoid TOCTOU with earlier protection check */
2496                         cgroup_size = max(cgroup_size, protection);
2497
2498                         scan = lruvec_size - lruvec_size * protection /
2499                                 cgroup_size;
2500
2501                         /*
2502                          * Minimally target SWAP_CLUSTER_MAX pages to keep
2503                          * reclaim moving forwards, avoiding decremeting
2504                          * sc->priority further than desirable.
2505                          */
2506                         scan = max(scan, SWAP_CLUSTER_MAX);
2507                 } else {
2508                         scan = lruvec_size;
2509                 }
2510
2511                 scan >>= sc->priority;
2512
2513                 /*
2514                  * If the cgroup's already been deleted, make sure to
2515                  * scrape out the remaining cache.
2516                  */
2517                 if (!scan && !mem_cgroup_online(memcg))
2518                         scan = min(lruvec_size, SWAP_CLUSTER_MAX);
2519
2520                 switch (scan_balance) {
2521                 case SCAN_EQUAL:
2522                         /* Scan lists relative to size */
2523                         break;
2524                 case SCAN_FRACT:
2525                         /*
2526                          * Scan types proportional to swappiness and
2527                          * their relative recent reclaim efficiency.
2528                          * Make sure we don't miss the last page on
2529                          * the offlined memory cgroups because of a
2530                          * round-off error.
2531                          */
2532                         scan = mem_cgroup_online(memcg) ?
2533                                div64_u64(scan * fraction[file], denominator) :
2534                                DIV64_U64_ROUND_UP(scan * fraction[file],
2535                                                   denominator);
2536                         break;
2537                 case SCAN_FILE:
2538                 case SCAN_ANON:
2539                         /* Scan one type exclusively */
2540                         if ((scan_balance == SCAN_FILE) != file)
2541                                 scan = 0;
2542                         break;
2543                 default:
2544                         /* Look ma, no brain */
2545                         BUG();
2546                 }
2547
2548                 nr[lru] = scan;
2549         }
2550 }
2551
2552 #ifdef CONFIG_LRU_GEN
2553
2554 #ifdef CONFIG_LRU_GEN_ENABLED
2555 DEFINE_STATIC_KEY_ARRAY_TRUE(lru_gen_caps, NR_LRU_GEN_CAPS);
2556 #define get_cap(cap)    static_branch_likely(&lru_gen_caps[cap])
2557 #else
2558 DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS);
2559 #define get_cap(cap)    static_branch_unlikely(&lru_gen_caps[cap])
2560 #endif
2561
2562 /******************************************************************************
2563  *                          shorthand helpers
2564  ******************************************************************************/
2565
2566 #define LRU_REFS_FLAGS  (BIT(PG_referenced) | BIT(PG_workingset))
2567
2568 #define DEFINE_MAX_SEQ(lruvec)                                          \
2569         unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq)
2570
2571 #define DEFINE_MIN_SEQ(lruvec)                                          \
2572         unsigned long min_seq[ANON_AND_FILE] = {                        \
2573                 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]),      \
2574                 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]),      \
2575         }
2576
2577 #define for_each_gen_type_zone(gen, type, zone)                         \
2578         for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++)                   \
2579                 for ((type) = 0; (type) < ANON_AND_FILE; (type)++)      \
2580                         for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
2581
2582 static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid)
2583 {
2584         struct pglist_data *pgdat = NODE_DATA(nid);
2585
2586 #ifdef CONFIG_MEMCG
2587         if (memcg) {
2588                 struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec;
2589
2590                 /* for hotadd_new_pgdat() */
2591                 if (!lruvec->pgdat)
2592                         lruvec->pgdat = pgdat;
2593
2594                 return lruvec;
2595         }
2596 #endif
2597         VM_WARN_ON_ONCE(!mem_cgroup_disabled());
2598
2599         return pgdat ? &pgdat->__lruvec : NULL;
2600 }
2601
2602 static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc)
2603 {
2604         struct mem_cgroup *memcg = lruvec_memcg(lruvec);
2605         /* struct pglist_data *pgdat = lruvec_pgdat(lruvec); */
2606
2607         /* FIXME: see a2a36488a61c + 26aa2d199d6f */
2608         if (/* !can_demote(pgdat->node_id, sc) && */
2609             mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH)
2610                 return 0;
2611
2612         return mem_cgroup_swappiness(memcg);
2613 }
2614
2615 static int get_nr_gens(struct lruvec *lruvec, int type)
2616 {
2617         return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1;
2618 }
2619
2620 static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
2621 {
2622         /* see the comment on lru_gen_struct */
2623         return get_nr_gens(lruvec, LRU_GEN_FILE) >= MIN_NR_GENS &&
2624                get_nr_gens(lruvec, LRU_GEN_FILE) <= get_nr_gens(lruvec, LRU_GEN_ANON) &&
2625                get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS;
2626 }
2627
2628 /******************************************************************************
2629  *                          mm_struct list
2630  ******************************************************************************/
2631
2632 static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
2633 {
2634         static struct lru_gen_mm_list mm_list = {
2635                 .fifo = LIST_HEAD_INIT(mm_list.fifo),
2636                 .lock = __SPIN_LOCK_UNLOCKED(mm_list.lock),
2637         };
2638
2639 #ifdef CONFIG_MEMCG
2640         if (memcg)
2641                 return &memcg->mm_list;
2642 #endif
2643         VM_WARN_ON_ONCE(!mem_cgroup_disabled());
2644
2645         return &mm_list;
2646 }
2647
2648 void lru_gen_add_mm(struct mm_struct *mm)
2649 {
2650         int nid;
2651         struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm);
2652         struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
2653
2654         VM_WARN_ON_ONCE(!list_empty(&mm->lru_gen.list));
2655 #ifdef CONFIG_MEMCG
2656         VM_WARN_ON_ONCE(mm->lru_gen.memcg);
2657         mm->lru_gen.memcg = memcg;
2658 #endif
2659         spin_lock(&mm_list->lock);
2660
2661         for_each_node_state(nid, N_MEMORY) {
2662                 struct lruvec *lruvec = get_lruvec(memcg, nid);
2663
2664                 if (!lruvec)
2665                         continue;
2666
2667                 /* the first addition since the last iteration */
2668                 if (lruvec->mm_state.tail == &mm_list->fifo)
2669                         lruvec->mm_state.tail = &mm->lru_gen.list;
2670         }
2671
2672         list_add_tail(&mm->lru_gen.list, &mm_list->fifo);
2673
2674         spin_unlock(&mm_list->lock);
2675 }
2676
2677 void lru_gen_del_mm(struct mm_struct *mm)
2678 {
2679         int nid;
2680         struct lru_gen_mm_list *mm_list;
2681         struct mem_cgroup *memcg = NULL;
2682
2683         if (list_empty(&mm->lru_gen.list))
2684                 return;
2685
2686 #ifdef CONFIG_MEMCG
2687         memcg = mm->lru_gen.memcg;
2688 #endif
2689         mm_list = get_mm_list(memcg);
2690
2691         spin_lock(&mm_list->lock);
2692
2693         for_each_node(nid) {
2694                 struct lruvec *lruvec = get_lruvec(memcg, nid);
2695
2696                 if (!lruvec)
2697                         continue;
2698
2699                 /* where the current iteration continues after */
2700                 if (lruvec->mm_state.head == &mm->lru_gen.list)
2701                         lruvec->mm_state.head = lruvec->mm_state.head->prev;
2702
2703                 /* where the last iteration ended before */
2704                 if (lruvec->mm_state.tail == &mm->lru_gen.list)
2705                         lruvec->mm_state.tail = lruvec->mm_state.tail->next;
2706         }
2707
2708         list_del_init(&mm->lru_gen.list);
2709
2710         spin_unlock(&mm_list->lock);
2711
2712 #ifdef CONFIG_MEMCG
2713         mem_cgroup_put(mm->lru_gen.memcg);
2714         mm->lru_gen.memcg = NULL;
2715 #endif
2716 }
2717
2718 #ifdef CONFIG_MEMCG
2719 void lru_gen_migrate_mm(struct mm_struct *mm)
2720 {
2721         struct mem_cgroup *memcg;
2722         struct task_struct *task = rcu_dereference_protected(mm->owner, true);
2723
2724         VM_WARN_ON_ONCE(task->mm != mm);
2725         lockdep_assert_held(&task->alloc_lock);
2726
2727         /* for mm_update_next_owner() */
2728         if (mem_cgroup_disabled())
2729                 return;
2730
2731         rcu_read_lock();
2732         memcg = mem_cgroup_from_task(task);
2733         rcu_read_unlock();
2734         if (memcg == mm->lru_gen.memcg)
2735                 return;
2736
2737         VM_WARN_ON_ONCE(!mm->lru_gen.memcg);
2738         VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list));
2739
2740         lru_gen_del_mm(mm);
2741         lru_gen_add_mm(mm);
2742 }
2743 #endif
2744
2745 /*
2746  * Bloom filters with m=1<<15, k=2 and the false positive rates of ~1/5 when
2747  * n=10,000 and ~1/2 when n=20,000, where, conventionally, m is the number of
2748  * bits in a bitmap, k is the number of hash functions and n is the number of
2749  * inserted items.
2750  *
2751  * Page table walkers use one of the two filters to reduce their search space.
2752  * To get rid of non-leaf entries that no longer have enough leaf entries, the
2753  * aging uses the double-buffering technique to flip to the other filter each
2754  * time it produces a new generation. For non-leaf entries that have enough
2755  * leaf entries, the aging carries them over to the next generation in
2756  * walk_pmd_range(); the eviction also report them when walking the rmap
2757  * in lru_gen_look_around().
2758  *
2759  * For future optimizations:
2760  * 1. It's not necessary to keep both filters all the time. The spare one can be
2761  *    freed after the RCU grace period and reallocated if needed again.
2762  * 2. And when reallocating, it's worth scaling its size according to the number
2763  *    of inserted entries in the other filter, to reduce the memory overhead on
2764  *    small systems and false positives on large systems.
2765  * 3. Jenkins' hash function is an alternative to Knuth's.
2766  */
2767 #define BLOOM_FILTER_SHIFT      15
2768
2769 static inline int filter_gen_from_seq(unsigned long seq)
2770 {
2771         return seq % NR_BLOOM_FILTERS;
2772 }
2773
2774 static void get_item_key(void *item, int *key)
2775 {
2776         u32 hash = hash_ptr(item, BLOOM_FILTER_SHIFT * 2);
2777
2778         BUILD_BUG_ON(BLOOM_FILTER_SHIFT * 2 > BITS_PER_TYPE(u32));
2779
2780         key[0] = hash & (BIT(BLOOM_FILTER_SHIFT) - 1);
2781         key[1] = hash >> BLOOM_FILTER_SHIFT;
2782 }
2783
2784 static void reset_bloom_filter(struct lruvec *lruvec, unsigned long seq)
2785 {
2786         unsigned long *filter;
2787         int gen = filter_gen_from_seq(seq);
2788
2789         filter = lruvec->mm_state.filters[gen];
2790         if (filter) {
2791                 bitmap_clear(filter, 0, BIT(BLOOM_FILTER_SHIFT));
2792                 return;
2793         }
2794
2795         filter = bitmap_zalloc(BIT(BLOOM_FILTER_SHIFT),
2796                                __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
2797         WRITE_ONCE(lruvec->mm_state.filters[gen], filter);
2798 }
2799
2800 static void update_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item)
2801 {
2802         int key[2];
2803         unsigned long *filter;
2804         int gen = filter_gen_from_seq(seq);
2805
2806         filter = READ_ONCE(lruvec->mm_state.filters[gen]);
2807         if (!filter)
2808                 return;
2809
2810         get_item_key(item, key);
2811
2812         if (!test_bit(key[0], filter))
2813                 set_bit(key[0], filter);
2814         if (!test_bit(key[1], filter))
2815                 set_bit(key[1], filter);
2816 }
2817
2818 static bool test_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item)
2819 {
2820         int key[2];
2821         unsigned long *filter;
2822         int gen = filter_gen_from_seq(seq);
2823
2824         filter = READ_ONCE(lruvec->mm_state.filters[gen]);
2825         if (!filter)
2826                 return true;
2827
2828         get_item_key(item, key);
2829
2830         return test_bit(key[0], filter) && test_bit(key[1], filter);
2831 }
2832
2833 static void reset_mm_stats(struct lruvec *lruvec, struct lru_gen_mm_walk *walk, bool last)
2834 {
2835         int i;
2836         int hist;
2837
2838         lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock);
2839
2840         if (walk) {
2841                 hist = lru_hist_from_seq(walk->max_seq);
2842
2843                 for (i = 0; i < NR_MM_STATS; i++) {
2844                         WRITE_ONCE(lruvec->mm_state.stats[hist][i],
2845                                    lruvec->mm_state.stats[hist][i] + walk->mm_stats[i]);
2846                         walk->mm_stats[i] = 0;
2847                 }
2848         }
2849
2850         if (NR_HIST_GENS > 1 && last) {
2851                 hist = lru_hist_from_seq(lruvec->mm_state.seq + 1);
2852
2853                 for (i = 0; i < NR_MM_STATS; i++)
2854                         WRITE_ONCE(lruvec->mm_state.stats[hist][i], 0);
2855         }
2856 }
2857
2858 static bool should_skip_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk)
2859 {
2860         int type;
2861         unsigned long size = 0;
2862         struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
2863         int key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap);
2864
2865         if (!walk->force_scan && !test_bit(key, &mm->lru_gen.bitmap))
2866                 return true;
2867
2868         clear_bit(key, &mm->lru_gen.bitmap);
2869
2870         for (type = !walk->can_swap; type < ANON_AND_FILE; type++) {
2871                 size += type ? get_mm_counter(mm, MM_FILEPAGES) :
2872                                get_mm_counter(mm, MM_ANONPAGES) +
2873                                get_mm_counter(mm, MM_SHMEMPAGES);
2874         }
2875
2876         if (size < MIN_LRU_BATCH)
2877                 return true;
2878
2879         return !mmget_not_zero(mm);
2880 }
2881
2882 static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk,
2883                             struct mm_struct **iter)
2884 {
2885         bool first = false;
2886         bool last = false;
2887         struct mm_struct *mm = NULL;
2888         struct mem_cgroup *memcg = lruvec_memcg(lruvec);
2889         struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
2890         struct lru_gen_mm_state *mm_state = &lruvec->mm_state;
2891
2892         /*
2893          * mm_state->seq is incremented after each iteration of mm_list. There
2894          * are three interesting cases for this page table walker:
2895          * 1. It tries to start a new iteration with a stale max_seq: there is
2896          *    nothing left to do.
2897          * 2. It started the next iteration: it needs to reset the Bloom filter
2898          *    so that a fresh set of PTE tables can be recorded.
2899          * 3. It ended the current iteration: it needs to reset the mm stats
2900          *    counters and tell its caller to increment max_seq.
2901          */
2902         spin_lock(&mm_list->lock);
2903
2904         VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->max_seq);
2905
2906         if (walk->max_seq <= mm_state->seq)
2907                 goto done;
2908
2909         if (!mm_state->head)
2910                 mm_state->head = &mm_list->fifo;
2911
2912         if (mm_state->head == &mm_list->fifo)
2913                 first = true;
2914
2915         do {
2916                 mm_state->head = mm_state->head->next;
2917                 if (mm_state->head == &mm_list->fifo) {
2918                         WRITE_ONCE(mm_state->seq, mm_state->seq + 1);
2919                         last = true;
2920                         break;
2921                 }
2922
2923                 /* force scan for those added after the last iteration */
2924                 if (!mm_state->tail || mm_state->tail == mm_state->head) {
2925                         mm_state->tail = mm_state->head->next;
2926                         walk->force_scan = true;
2927                 }
2928
2929                 mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list);
2930                 if (should_skip_mm(mm, walk))
2931                         mm = NULL;
2932         } while (!mm);
2933 done:
2934         if (*iter || last)
2935                 reset_mm_stats(lruvec, walk, last);
2936
2937         spin_unlock(&mm_list->lock);
2938
2939         if (mm && first)
2940                 reset_bloom_filter(lruvec, walk->max_seq + 1);
2941
2942         if (*iter)
2943                 mmput_async(*iter);
2944
2945         *iter = mm;
2946
2947         return last;
2948 }
2949
2950 static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long max_seq)
2951 {
2952         bool success = false;
2953         struct mem_cgroup *memcg = lruvec_memcg(lruvec);
2954         struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
2955         struct lru_gen_mm_state *mm_state = &lruvec->mm_state;
2956
2957         spin_lock(&mm_list->lock);
2958
2959         VM_WARN_ON_ONCE(mm_state->seq + 1 < max_seq);
2960
2961         if (max_seq > mm_state->seq) {
2962                 mm_state->head = NULL;
2963                 mm_state->tail = NULL;
2964                 WRITE_ONCE(mm_state->seq, mm_state->seq + 1);
2965                 reset_mm_stats(lruvec, NULL, true);
2966                 success = true;
2967         }
2968
2969         spin_unlock(&mm_list->lock);
2970
2971         return success;
2972 }
2973
2974 /******************************************************************************
2975  *                          refault feedback loop
2976  ******************************************************************************/
2977
2978 /*
2979  * A feedback loop based on Proportional-Integral-Derivative (PID) controller.
2980  *
2981  * The P term is refaulted/(evicted+protected) from a tier in the generation
2982  * currently being evicted; the I term is the exponential moving average of the
2983  * P term over the generations previously evicted, using the smoothing factor
2984  * 1/2; the D term isn't supported.
2985  *
2986  * The setpoint (SP) is always the first tier of one type; the process variable
2987  * (PV) is either any tier of the other type or any other tier of the same
2988  * type.
2989  *
2990  * The error is the difference between the SP and the PV; the correction is to
2991  * turn off protection when SP>PV or turn on protection when SP<PV.
2992  *
2993  * For future optimizations:
2994  * 1. The D term may discount the other two terms over time so that long-lived
2995  *    generations can resist stale information.
2996  */
2997 struct ctrl_pos {
2998         unsigned long refaulted;
2999         unsigned long total;
3000         int gain;
3001 };
3002
3003 static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain,
3004                           struct ctrl_pos *pos)
3005 {
3006         struct lru_gen_struct *lrugen = &lruvec->lrugen;
3007         int hist = lru_hist_from_seq(lrugen->min_seq[type]);
3008
3009         pos->refaulted = lrugen->avg_refaulted[type][tier] +
3010                          atomic_long_read(&lrugen->refaulted[hist][type][tier]);
3011         pos->total = lrugen->avg_total[type][tier] +
3012                      atomic_long_read(&lrugen->evicted[hist][type][tier]);
3013         if (tier)
3014                 pos->total += lrugen->protected[hist][type][tier - 1];
3015         pos->gain = gain;
3016 }
3017
3018 static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover)
3019 {
3020         int hist, tier;
3021         struct lru_gen_struct *lrugen = &lruvec->lrugen;
3022         bool clear = carryover ? NR_HIST_GENS == 1 : NR_HIST_GENS > 1;
3023         unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1;
3024
3025         lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock);
3026
3027         if (!carryover && !clear)
3028                 return;
3029
3030         hist = lru_hist_from_seq(seq);
3031
3032         for (tier = 0; tier < MAX_NR_TIERS; tier++) {
3033                 if (carryover) {
3034                         unsigned long sum;
3035
3036                         sum = lrugen->avg_refaulted[type][tier] +
3037                               atomic_long_read(&lrugen->refaulted[hist][type][tier]);
3038                         WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2);
3039
3040                         sum = lrugen->avg_total[type][tier] +
3041                               atomic_long_read(&lrugen->evicted[hist][type][tier]);
3042                         if (tier)
3043                                 sum += lrugen->protected[hist][type][tier - 1];
3044                         WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2);
3045                 }
3046
3047                 if (clear) {
3048                         atomic_long_set(&lrugen->refaulted[hist][type][tier], 0);
3049                         atomic_long_set(&lrugen->evicted[hist][type][tier], 0);
3050                         if (tier)
3051                                 WRITE_ONCE(lrugen->protected[hist][type][tier - 1], 0);
3052                 }
3053         }
3054 }
3055
3056 static bool positive_ctrl_err(struct ctrl_pos *sp, struct ctrl_pos *pv)
3057 {
3058         /*
3059          * Return true if the PV has a limited number of refaults or a lower
3060          * refaulted/total than the SP.
3061          */
3062         return pv->refaulted < MIN_LRU_BATCH ||
3063                pv->refaulted * (sp->total + MIN_LRU_BATCH) * sp->gain <=
3064                (sp->refaulted + 1) * pv->total * pv->gain;
3065 }
3066
3067 /******************************************************************************
3068  *                          the aging
3069  ******************************************************************************/
3070
3071 /* promote pages accessed through page tables */
3072 static int page_update_gen(struct page *page, int gen)
3073 {
3074         unsigned long new_flags, old_flags = READ_ONCE(page->flags);
3075
3076         VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
3077         VM_WARN_ON_ONCE(!rcu_read_lock_held());
3078
3079         do {
3080                 /* lru_gen_del_page() has isolated this page? */
3081                 if (!(old_flags & LRU_GEN_MASK)) {
3082                         /* for shrink_page_list() */
3083                         new_flags = old_flags | BIT(PG_referenced);
3084                         continue;
3085                 }
3086
3087                 new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS);
3088                 new_flags |= (gen + 1UL) << LRU_GEN_PGOFF;
3089         } while (!try_cmpxchg(&page->flags, &old_flags, new_flags));
3090
3091         return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
3092 }
3093
3094 /* protect pages accessed multiple times through file descriptors */
3095 static int page_inc_gen(struct lruvec *lruvec, struct page *page, bool reclaiming)
3096 {
3097         int type = page_is_file_cache(page);
3098         struct lru_gen_struct *lrugen = &lruvec->lrugen;
3099         int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
3100         unsigned long new_flags, old_flags = READ_ONCE(page->flags);
3101
3102         VM_WARN_ON_ONCE_PAGE(!(old_flags & LRU_GEN_MASK), page);
3103
3104         do {
3105                 new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
3106                 /* page_update_gen() has promoted this page? */
3107                 if (new_gen >= 0 && new_gen != old_gen)
3108                         return new_gen;
3109
3110                 new_gen = (old_gen + 1) % MAX_NR_GENS;
3111
3112                 new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS);
3113                 new_flags |= (new_gen + 1UL) << LRU_GEN_PGOFF;
3114                 /* for end_page_writeback() */
3115                 if (reclaiming)
3116                         new_flags |= BIT(PG_reclaim);
3117         } while (!try_cmpxchg(&page->flags, &old_flags, new_flags));
3118
3119         lru_gen_update_size(lruvec, page, old_gen, new_gen);
3120
3121         return new_gen;
3122 }
3123
3124 static void update_batch_size(struct lru_gen_mm_walk *walk, struct page *page,
3125                               int old_gen, int new_gen)
3126 {
3127         int type = page_is_file_cache(page);
3128         int zone = page_zonenum(page);
3129         int delta = hpage_nr_pages(page);
3130
3131         VM_WARN_ON_ONCE(old_gen >= MAX_NR_GENS);
3132         VM_WARN_ON_ONCE(new_gen >= MAX_NR_GENS);
3133
3134         walk->batched++;
3135
3136         walk->nr_pages[old_gen][type][zone] -= delta;
3137         walk->nr_pages[new_gen][type][zone] += delta;
3138 }
3139
3140 static void reset_batch_size(struct lruvec *lruvec, struct lru_gen_mm_walk *walk)
3141 {
3142         int gen, type, zone;
3143         struct lru_gen_struct *lrugen = &lruvec->lrugen;
3144
3145         walk->batched = 0;
3146
3147         for_each_gen_type_zone(gen, type, zone) {
3148                 enum lru_list lru = type * LRU_INACTIVE_FILE;
3149                 int delta = walk->nr_pages[gen][type][zone];
3150
3151                 if (!delta)
3152                         continue;
3153
3154                 walk->nr_pages[gen][type][zone] = 0;
3155                 WRITE_ONCE(lrugen->nr_pages[gen][type][zone],
3156                            lrugen->nr_pages[gen][type][zone] + delta);
3157
3158                 if (lru_gen_is_active(lruvec, gen))
3159                         lru += LRU_ACTIVE;
3160                 __update_lru_size(lruvec, lru, zone, delta);
3161         }
3162 }
3163
3164 static int should_skip_vma(unsigned long start, unsigned long end, struct mm_walk *args)
3165 {
3166         struct address_space *mapping;
3167         struct vm_area_struct *vma = args->vma;
3168         struct lru_gen_mm_walk *walk = args->private;
3169
3170         if (!vma_is_accessible(vma))
3171                 return true;
3172
3173         if (is_vm_hugetlb_page(vma))
3174                 return true;
3175
3176         if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL | VM_SEQ_READ | VM_RAND_READ))
3177                 return true;
3178
3179         if (vma == get_gate_vma(vma->vm_mm))
3180                 return true;
3181
3182         if (vma_is_anonymous(vma))
3183                 return !walk->can_swap;
3184
3185         if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping))
3186                 return true;
3187
3188         mapping = vma->vm_file->f_mapping;
3189         if (mapping_unevictable(mapping))
3190                 return true;
3191
3192         if (shmem_mapping(mapping))
3193                 return !walk->can_swap;
3194
3195         /* to exclude special mappings like dax, etc. */
3196         return !mapping->a_ops->readpage;
3197 }
3198
3199 /*
3200  * Some userspace memory allocators map many single-page VMAs. Instead of
3201  * returning back to the PGD table for each of such VMAs, finish an entire PMD
3202  * table to reduce zigzags and improve cache performance.
3203  */
3204 static bool get_next_vma(unsigned long mask, unsigned long size, struct mm_walk *args,
3205                          unsigned long *vm_start, unsigned long *vm_end)
3206 {
3207         unsigned long start = round_up(*vm_end, size);
3208         unsigned long end = (start | ~mask) + 1;
3209
3210         VM_WARN_ON_ONCE(mask & size);
3211         VM_WARN_ON_ONCE((start & mask) != (*vm_start & mask));
3212
3213         while (args->vma) {
3214                 if (start >= args->vma->vm_end) {
3215                         args->vma = args->vma->vm_next;
3216                         continue;
3217                 }
3218
3219                 if (end && end <= args->vma->vm_start)
3220                         return false;
3221
3222                 if (should_skip_vma(args->vma->vm_start, args->vma->vm_end, args)) {
3223                         args->vma = args->vma->vm_next;
3224                         continue;
3225                 }
3226
3227                 *vm_start = max(start, args->vma->vm_start);
3228                 *vm_end = min(end - 1, args->vma->vm_end - 1) + 1;
3229
3230                 return true;
3231         }
3232
3233         return false;
3234 }
3235
3236 static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned long addr)
3237 {
3238         unsigned long pfn = pte_pfn(pte);
3239
3240         VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end);
3241
3242         if (!pte_present(pte) || is_zero_pfn(pfn))
3243                 return -1;
3244
3245         if (WARN_ON_ONCE(pte_devmap(pte) || pte_special(pte)))
3246                 return -1;
3247
3248         if (WARN_ON_ONCE(!pfn_valid(pfn)))
3249                 return -1;
3250
3251         return pfn;
3252 }
3253
3254 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
3255 static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr)
3256 {
3257         unsigned long pfn = pmd_pfn(pmd);
3258
3259         VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end);
3260
3261         if (!pmd_present(pmd) || is_huge_zero_pmd(pmd))
3262                 return -1;
3263
3264         if (WARN_ON_ONCE(pmd_devmap(pmd)))
3265                 return -1;
3266
3267         if (WARN_ON_ONCE(!pfn_valid(pfn)))
3268                 return -1;
3269
3270         return pfn;
3271 }
3272 #endif
3273
3274 static struct page *get_pfn_page(unsigned long pfn, struct mem_cgroup *memcg,
3275                                  struct pglist_data *pgdat, bool can_swap)
3276 {
3277         struct page *page;
3278
3279         /* try to avoid unnecessary memory loads */
3280         if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
3281                 return NULL;
3282
3283         page = compound_head(pfn_to_page(pfn));
3284         if (page_to_nid(page) != pgdat->node_id)
3285                 return NULL;
3286
3287         if (page_memcg_rcu(page) != memcg)
3288                 return NULL;
3289
3290         /* file VMAs can contain anon pages from COW */
3291         if (!page_is_file_cache(page) && !can_swap)
3292                 return NULL;
3293
3294         return page;
3295 }
3296
3297 static bool suitable_to_scan(int total, int young)
3298 {
3299         int n = clamp_t(int, cache_line_size() / sizeof(pte_t), 2, 8);
3300
3301         /* suitable if the average number of young PTEs per cacheline is >=1 */
3302         return young * n >= total;
3303 }
3304
3305 static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
3306                            struct mm_walk *args)
3307 {
3308         int i;
3309         pte_t *pte;
3310         spinlock_t *ptl;
3311         unsigned long addr;
3312         int total = 0;
3313         int young = 0;
3314         struct lru_gen_mm_walk *walk = args->private;
3315         struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
3316         struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
3317         int old_gen, new_gen = lru_gen_from_seq(walk->max_seq);
3318
3319         VM_WARN_ON_ONCE(pmd_leaf(*pmd));
3320
3321         ptl = pte_lockptr(args->mm, pmd);
3322         if (!spin_trylock(ptl))
3323                 return false;
3324
3325         arch_enter_lazy_mmu_mode();
3326
3327         pte = pte_offset_map(pmd, start & PMD_MASK);
3328 restart:
3329         for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) {
3330                 unsigned long pfn;
3331                 struct page *page;
3332
3333                 total++;
3334                 walk->mm_stats[MM_LEAF_TOTAL]++;
3335
3336                 pfn = get_pte_pfn(pte[i], args->vma, addr);
3337                 if (pfn == -1)
3338                         continue;
3339
3340                 if (!pte_young(pte[i])) {
3341                         walk->mm_stats[MM_LEAF_OLD]++;
3342                         continue;
3343                 }
3344
3345                 page = get_pfn_page(pfn, memcg, pgdat, walk->can_swap);
3346                 if (!page)
3347                         continue;
3348
3349                 if (!ptep_test_and_clear_young(args->vma, addr, pte + i))
3350                         VM_WARN_ON_ONCE(true);
3351
3352                 young++;
3353                 walk->mm_stats[MM_LEAF_YOUNG]++;
3354
3355                 if (pte_dirty(pte[i]) && !PageDirty(page) &&
3356                     !(PageAnon(page) && PageSwapBacked(page) &&
3357                       !PageSwapCache(page)))
3358                         set_page_dirty(page);
3359
3360                 old_gen = page_update_gen(page, new_gen);
3361                 if (old_gen >= 0 && old_gen != new_gen)
3362                         update_batch_size(walk, page, old_gen, new_gen);
3363         }
3364
3365         if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end))
3366                 goto restart;
3367
3368         pte_unmap(pte);
3369
3370         arch_leave_lazy_mmu_mode();
3371         spin_unlock(ptl);
3372
3373         return suitable_to_scan(total, young);
3374 }
3375
3376 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
3377 static void walk_pmd_range_locked(pud_t *pud, unsigned long next, struct vm_area_struct *vma,
3378                                   struct mm_walk *args, unsigned long *bitmap, unsigned long *start)
3379 {
3380         int i;
3381         pmd_t *pmd;
3382         spinlock_t *ptl;
3383         struct lru_gen_mm_walk *walk = args->private;
3384         struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
3385         struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
3386         int old_gen, new_gen = lru_gen_from_seq(walk->max_seq);
3387
3388         VM_WARN_ON_ONCE(pud_leaf(*pud));
3389
3390         /* try to batch at most 1+MIN_LRU_BATCH+1 entries */
3391         if (*start == -1) {
3392                 *start = next;
3393                 return;
3394         }
3395
3396         i = next == -1 ? 0 : pmd_index(next) - pmd_index(*start);
3397         if (i && i <= MIN_LRU_BATCH) {
3398                 __set_bit(i - 1, bitmap);
3399                 return;
3400         }
3401
3402         pmd = pmd_offset(pud, *start);
3403
3404         ptl = pmd_lockptr(args->mm, pmd);
3405         if (!spin_trylock(ptl))
3406                 goto done;
3407
3408         arch_enter_lazy_mmu_mode();
3409
3410         do {
3411                 unsigned long pfn;
3412                 struct page *page;
3413                 unsigned long addr = i ? (*start & PMD_MASK) + i * PMD_SIZE : *start;
3414
3415                 pfn = get_pmd_pfn(pmd[i], vma, addr);
3416                 if (pfn == -1)
3417                         goto next;
3418
3419                 if (!pmd_trans_huge(pmd[i])) {
3420                         if (IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) &&
3421                             get_cap(LRU_GEN_NONLEAF_YOUNG))
3422                                 pmdp_test_and_clear_young(vma, addr, pmd + i);
3423                         goto next;
3424                 }
3425
3426                 page = get_pfn_page(pfn, memcg, pgdat, walk->can_swap);
3427                 if (!page)
3428                         goto next;
3429
3430                 if (!pmdp_test_and_clear_young(vma, addr, pmd + i))
3431                         goto next;
3432
3433                 walk->mm_stats[MM_LEAF_YOUNG]++;
3434
3435                 if (pmd_dirty(pmd[i]) && !PageDirty(page) &&
3436                     !(PageAnon(page) && PageSwapBacked(page) &&
3437                       !PageSwapCache(page)))
3438                         set_page_dirty(page);
3439
3440                 old_gen = page_update_gen(page, new_gen);
3441                 if (old_gen >= 0 && old_gen != new_gen)
3442                         update_batch_size(walk, page, old_gen, new_gen);
3443 next:
3444                 i = i > MIN_LRU_BATCH ? 0 : find_next_bit(bitmap, MIN_LRU_BATCH, i) + 1;
3445         } while (i <= MIN_LRU_BATCH);
3446
3447         arch_leave_lazy_mmu_mode();
3448         spin_unlock(ptl);
3449 done:
3450         *start = -1;
3451         bitmap_zero(bitmap, MIN_LRU_BATCH);
3452 }
3453 #else
3454 static void walk_pmd_range_locked(pud_t *pud, unsigned long next, struct vm_area_struct *vma,
3455                                   struct mm_walk *args, unsigned long *bitmap, unsigned long *start)
3456 {
3457 }
3458 #endif
3459
3460 static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
3461                            struct mm_walk *args)
3462 {
3463         int i;
3464         pmd_t *pmd;
3465         unsigned long next;
3466         unsigned long addr;
3467         struct vm_area_struct *vma;
3468         unsigned long pos = -1;
3469         struct lru_gen_mm_walk *walk = args->private;
3470         unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)] = {};
3471
3472         VM_WARN_ON_ONCE(pud_leaf(*pud));
3473
3474         /*
3475          * Finish an entire PMD in two passes: the first only reaches to PTE
3476          * tables to avoid taking the PMD lock; the second, if necessary, takes
3477          * the PMD lock to clear the accessed bit in PMD entries.
3478          */
3479         pmd = pmd_offset(pud, start & PUD_MASK);
3480 restart:
3481         /* walk_pte_range() may call get_next_vma() */
3482         vma = args->vma;
3483         for (i = pmd_index(start), addr = start; addr != end; i++, addr = next) {
3484                 pmd_t val = pmd_read_atomic(pmd + i);
3485
3486                 /* for pmd_read_atomic() */
3487                 barrier();
3488
3489                 next = pmd_addr_end(addr, end);
3490
3491                 if (!pmd_present(val) || is_huge_zero_pmd(val)) {
3492                         walk->mm_stats[MM_LEAF_TOTAL]++;
3493                         continue;
3494                 }
3495
3496 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3497                 if (pmd_trans_huge(val)) {
3498                         unsigned long pfn = pmd_pfn(val);
3499                         struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
3500
3501                         walk->mm_stats[MM_LEAF_TOTAL]++;
3502
3503                         if (!pmd_young(val)) {
3504                                 walk->mm_stats[MM_LEAF_OLD]++;
3505                                 continue;
3506                         }
3507
3508                         /* try to avoid unnecessary memory loads */
3509                         if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
3510                                 continue;
3511
3512                         walk_pmd_range_locked(pud, addr, vma, args, bitmap, &pos);
3513                         continue;
3514                 }
3515 #endif
3516                 walk->mm_stats[MM_NONLEAF_TOTAL]++;
3517
3518 #ifdef CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG
3519                 if (get_cap(LRU_GEN_NONLEAF_YOUNG)) {
3520                         if (!pmd_young(val))
3521                                 continue;
3522
3523                         walk_pmd_range_locked(pud, addr, vma, args, bitmap, &pos);
3524                 }
3525 #endif
3526                 if (!walk->force_scan && !test_bloom_filter(walk->lruvec, walk->max_seq, pmd + i))
3527                         continue;
3528
3529                 walk->mm_stats[MM_NONLEAF_FOUND]++;
3530
3531                 if (!walk_pte_range(&val, addr, next, args))
3532                         continue;
3533
3534                 walk->mm_stats[MM_NONLEAF_ADDED]++;
3535
3536                 /* carry over to the next generation */
3537                 update_bloom_filter(walk->lruvec, walk->max_seq + 1, pmd + i);
3538         }
3539
3540         walk_pmd_range_locked(pud, -1, vma, args, bitmap, &pos);
3541
3542         if (i < PTRS_PER_PMD && get_next_vma(PUD_MASK, PMD_SIZE, args, &start, &end))
3543                 goto restart;
3544 }
3545
3546 static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end,
3547                           struct mm_walk *args)
3548 {
3549         int i;
3550         pud_t *pud;
3551         unsigned long addr;
3552         unsigned long next;
3553         struct lru_gen_mm_walk *walk = args->private;
3554
3555         VM_WARN_ON_ONCE(p4d_leaf(*p4d));
3556
3557         pud = pud_offset(p4d, start & P4D_MASK);
3558 restart:
3559         for (i = pud_index(start), addr = start; addr != end; i++, addr = next) {
3560                 pud_t val = READ_ONCE(pud[i]);
3561
3562                 next = pud_addr_end(addr, end);
3563
3564                 if (!pud_present(val) || WARN_ON_ONCE(pud_leaf(val)))
3565                         continue;
3566
3567                 walk_pmd_range(&val, addr, next, args);
3568
3569                 if (need_resched() || walk->batched >= MAX_LRU_BATCH) {
3570                         end = (addr | ~PUD_MASK) + 1;
3571                         goto done;
3572                 }
3573         }
3574
3575         if (i < PTRS_PER_PUD && get_next_vma(P4D_MASK, PUD_SIZE, args, &start, &end))
3576                 goto restart;
3577
3578         end = round_up(end, P4D_SIZE);
3579 done:
3580         if (!end || !args->vma)
3581                 return 1;
3582
3583         walk->next_addr = max(end, args->vma->vm_start);
3584
3585         return -EAGAIN;
3586 }
3587
3588 static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_mm_walk *walk)
3589 {
3590         static const struct mm_walk_ops mm_walk_ops = {
3591                 .test_walk = should_skip_vma,
3592                 .p4d_entry = walk_pud_range,
3593         };
3594
3595         int err;
3596         struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3597
3598         walk->next_addr = FIRST_USER_ADDRESS;
3599
3600         do {
3601                 DEFINE_MAX_SEQ(lruvec);
3602
3603                 err = -EBUSY;
3604
3605                 /* another thread might have called inc_max_seq() */
3606                 if (walk->max_seq != max_seq)
3607                         break;
3608
3609                 /* page_update_gen() requires stable page_memcg() */
3610                 if (!mem_cgroup_trylock_pages(memcg))
3611                         break;
3612
3613                 /* the caller might be holding the lock for write */
3614                 if (down_read_trylock(&mm->mmap_sem)) {
3615                         err = walk_page_range(mm, walk->next_addr, ULONG_MAX, &mm_walk_ops, walk);
3616
3617                         up_write(&mm->mmap_sem);
3618                 }
3619
3620                 mem_cgroup_unlock_pages();
3621
3622                 if (walk->batched) {
3623                         spin_lock_irq(&lruvec_pgdat(lruvec)->lru_lock);
3624                         reset_batch_size(lruvec, walk);
3625                         spin_unlock_irq(&lruvec_pgdat(lruvec)->lru_lock);
3626                 }
3627
3628                 cond_resched();
3629         } while (err == -EAGAIN);
3630 }
3631
3632 static struct lru_gen_mm_walk *set_mm_walk(struct pglist_data *pgdat)
3633 {
3634         struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk;
3635
3636         if (pgdat && current_is_kswapd()) {
3637                 VM_WARN_ON_ONCE(walk);
3638
3639                 walk = &pgdat->mm_walk;
3640         } else if (!pgdat && !walk) {
3641                 VM_WARN_ON_ONCE(current_is_kswapd());
3642
3643                 walk = kzalloc(sizeof(*walk), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
3644         }
3645
3646         current->reclaim_state->mm_walk = walk;
3647
3648         return walk;
3649 }
3650
3651 static void clear_mm_walk(void)
3652 {
3653         struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk;
3654
3655         VM_WARN_ON_ONCE(walk && memchr_inv(walk->nr_pages, 0, sizeof(walk->nr_pages)));
3656         VM_WARN_ON_ONCE(walk && memchr_inv(walk->mm_stats, 0, sizeof(walk->mm_stats)));
3657
3658         current->reclaim_state->mm_walk = NULL;
3659
3660         if (!current_is_kswapd())
3661                 kfree(walk);
3662 }
3663
3664 static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap)
3665 {
3666         int zone;
3667         int remaining = MAX_LRU_BATCH;
3668         struct lru_gen_struct *lrugen = &lruvec->lrugen;
3669         int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
3670
3671         if (type == LRU_GEN_ANON && !can_swap)
3672                 goto done;
3673
3674         /* prevent cold/hot inversion if force_scan is true */
3675         for (zone = 0; zone < MAX_NR_ZONES; zone++) {
3676                 struct list_head *head = &lrugen->lists[old_gen][type][zone];
3677
3678                 while (!list_empty(head)) {
3679                         struct page *page = lru_to_page(head);
3680
3681                         VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page);
3682                         VM_WARN_ON_ONCE_PAGE(PageActive(page), page);
3683                         VM_WARN_ON_ONCE_PAGE(page_is_file_cache(page) != type, page);
3684                         VM_WARN_ON_ONCE_PAGE(page_zonenum(page) != zone, page);
3685
3686                         new_gen = page_inc_gen(lruvec, page, false);
3687                         list_move_tail(&page->lru, &lrugen->lists[new_gen][type][zone]);
3688
3689                         if (!--remaining)
3690                                 return false;
3691                 }
3692         }
3693 done:
3694         reset_ctrl_pos(lruvec, type, true);
3695         WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1);
3696
3697         return true;
3698 }
3699
3700 static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap)
3701 {
3702         int gen, type, zone;
3703         bool success = false;
3704         struct lru_gen_struct *lrugen = &lruvec->lrugen;
3705         DEFINE_MIN_SEQ(lruvec);
3706
3707         VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
3708
3709         /* find the oldest populated generation */
3710         for (type = !can_swap; type < ANON_AND_FILE; type++) {
3711                 while (min_seq[type] + MIN_NR_GENS <= lrugen->max_seq) {
3712                         gen = lru_gen_from_seq(min_seq[type]);
3713
3714                         for (zone = 0; zone < MAX_NR_ZONES; zone++) {
3715                                 if (!list_empty(&lrugen->lists[gen][type][zone]))
3716                                         goto next;
3717                         }
3718
3719                         min_seq[type]++;
3720                 }
3721 next:
3722                 ;
3723         }
3724
3725         /* see the comment on lru_gen_struct */
3726         if (can_swap) {
3727                 min_seq[LRU_GEN_ANON] = min(min_seq[LRU_GEN_ANON], min_seq[LRU_GEN_FILE]);
3728                 min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]);
3729         }
3730
3731         for (type = !can_swap; type < ANON_AND_FILE; type++) {
3732                 if (min_seq[type] == lrugen->min_seq[type])
3733                         continue;
3734
3735                 reset_ctrl_pos(lruvec, type, true);
3736                 WRITE_ONCE(lrugen->min_seq[type], min_seq[type]);
3737                 success = true;
3738         }
3739
3740         return success;
3741 }
3742
3743 static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool force_scan)
3744 {
3745         int prev, next;
3746         int type, zone;
3747         struct lru_gen_struct *lrugen = &lruvec->lrugen;
3748
3749         spin_lock_irq(&lruvec_pgdat(lruvec)->lru_lock);
3750
3751         VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
3752
3753         for (type = ANON_AND_FILE - 1; type >= 0; type--) {
3754                 if (get_nr_gens(lruvec, type) != MAX_NR_GENS)
3755                         continue;
3756
3757                 VM_WARN_ON_ONCE(!force_scan && (type == LRU_GEN_FILE || can_swap));
3758
3759                 while (!inc_min_seq(lruvec, type, can_swap)) {
3760                         spin_unlock_irq(&lruvec_pgdat(lruvec)->lru_lock);
3761                         cond_resched();
3762                         spin_lock_irq(&lruvec_pgdat(lruvec)->lru_lock);
3763                 }
3764         }
3765
3766         /*
3767          * Update the active/inactive LRU sizes for compatibility. Both sides of
3768          * the current max_seq need to be covered, since max_seq+1 can overlap
3769          * with min_seq[LRU_GEN_ANON] if swapping is constrained. And if they do
3770          * overlap, cold/hot inversion happens.
3771          */
3772         prev = lru_gen_from_seq(lrugen->max_seq - 1);
3773         next = lru_gen_from_seq(lrugen->max_seq + 1);
3774
3775         for (type = 0; type < ANON_AND_FILE; type++) {
3776                 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
3777                         enum lru_list lru = type * LRU_INACTIVE_FILE;
3778                         long delta = lrugen->nr_pages[prev][type][zone] -
3779                                      lrugen->nr_pages[next][type][zone];
3780
3781                         if (!delta)
3782                                 continue;
3783
3784                         __update_lru_size(lruvec, lru, zone, delta);
3785                         __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta);
3786                 }
3787         }
3788
3789         for (type = 0; type < ANON_AND_FILE; type++)
3790                 reset_ctrl_pos(lruvec, type, false);
3791
3792         WRITE_ONCE(lrugen->timestamps[next], jiffies);
3793         /* make sure preceding modifications appear */
3794         smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1);
3795
3796         spin_unlock_irq(&lruvec_pgdat(lruvec)->lru_lock);
3797 }
3798
3799 static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
3800                                struct scan_control *sc, bool can_swap, bool force_scan)
3801 {
3802         bool success;
3803         struct lru_gen_mm_walk *walk;
3804         struct mm_struct *mm = NULL;
3805         struct lru_gen_struct *lrugen = &lruvec->lrugen;
3806
3807         VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq));
3808
3809         /* see the comment in iterate_mm_list() */
3810         if (max_seq <= READ_ONCE(lruvec->mm_state.seq)) {
3811                 success = false;
3812                 goto done;
3813         }
3814
3815         /*
3816          * If the hardware doesn't automatically set the accessed bit, fallback
3817          * to lru_gen_look_around(), which only clears the accessed bit in a
3818          * handful of PTEs. Spreading the work out over a period of time usually
3819          * is less efficient, but it avoids bursty page faults.
3820          */
3821         if (!force_scan && !(arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))) {
3822                 success = iterate_mm_list_nowalk(lruvec, max_seq);
3823                 goto done;
3824         }
3825
3826         walk = set_mm_walk(NULL);
3827         if (!walk) {
3828                 success = iterate_mm_list_nowalk(lruvec, max_seq);
3829                 goto done;
3830         }
3831
3832         walk->lruvec = lruvec;
3833         walk->max_seq = max_seq;
3834         walk->can_swap = can_swap;
3835         walk->force_scan = force_scan;
3836
3837         do {
3838                 success = iterate_mm_list(lruvec, walk, &mm);
3839                 if (mm)
3840                         walk_mm(lruvec, mm, walk);
3841         } while (mm);
3842 done:
3843         if (success)
3844                 inc_max_seq(lruvec, can_swap, force_scan);
3845
3846         return success;
3847 }
3848
3849 static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, unsigned long *min_seq,
3850                              struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
3851 {
3852         int gen, type, zone;
3853         unsigned long old = 0;
3854         unsigned long young = 0;
3855         unsigned long total = 0;
3856         struct lru_gen_struct *lrugen = &lruvec->lrugen;
3857         struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3858
3859         for (type = !can_swap; type < ANON_AND_FILE; type++) {
3860                 unsigned long seq;
3861
3862                 for (seq = min_seq[type]; seq <= max_seq; seq++) {
3863                         unsigned long size = 0;
3864
3865                         gen = lru_gen_from_seq(seq);
3866
3867                         for (zone = 0; zone < MAX_NR_ZONES; zone++)
3868                                 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
3869
3870                         total += size;
3871                         if (seq == max_seq)
3872                                 young += size;
3873                         else if (seq + MIN_NR_GENS == max_seq)
3874                                 old += size;
3875                 }
3876         }
3877
3878         /* try to scrape all its memory if this memcg was deleted */
3879         *nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
3880
3881         /*
3882          * The aging tries to be lazy to reduce the overhead, while the eviction
3883          * stalls when the number of generations reaches MIN_NR_GENS. Hence, the
3884          * ideal number of generations is MIN_NR_GENS+1.
3885          */
3886         if (min_seq[!can_swap] + MIN_NR_GENS > max_seq)
3887                 return true;
3888         if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
3889                 return false;
3890
3891         /*
3892          * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1)
3893          * of the total number of pages for each generation. A reasonable range
3894          * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The
3895          * aging cares about the upper bound of hot pages, while the eviction
3896          * cares about the lower bound of cold pages.
3897          */
3898         if (young * MIN_NR_GENS > total)
3899                 return true;
3900         if (old * (MIN_NR_GENS + 2) < total)
3901                 return true;
3902
3903         return false;
3904 }
3905
3906 static bool age_lruvec(struct lruvec *lruvec, struct scan_control *sc, unsigned long min_ttl)
3907 {
3908         bool need_aging;
3909         unsigned long nr_to_scan;
3910         int swappiness = get_swappiness(lruvec, sc);
3911         struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3912         DEFINE_MAX_SEQ(lruvec);
3913         DEFINE_MIN_SEQ(lruvec);
3914
3915         VM_WARN_ON_ONCE(sc->memcg_low_reclaim);
3916
3917         mem_cgroup_calculate_protection(NULL, memcg);
3918
3919         if (mem_cgroup_below_min(memcg))
3920                 return false;
3921
3922         need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, swappiness, &nr_to_scan);
3923
3924         if (min_ttl) {
3925                 int gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]);
3926                 unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
3927
3928                 if (time_is_after_jiffies(birth + min_ttl))
3929                         return false;
3930
3931                 /* the size is likely too small to be helpful */
3932                 if (!nr_to_scan && sc->priority != DEF_PRIORITY)
3933                         return false;
3934         }
3935
3936         if (need_aging)
3937                 try_to_inc_max_seq(lruvec, max_seq, sc, swappiness, false);
3938
3939         return true;
3940 }
3941
3942 /* to protect the working set of the last N jiffies */
3943 static unsigned long lru_gen_min_ttl __read_mostly;
3944
3945 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
3946 {
3947         struct mem_cgroup *memcg;
3948         bool success = false;
3949         unsigned long min_ttl = READ_ONCE(lru_gen_min_ttl);
3950
3951         VM_WARN_ON_ONCE(!current_is_kswapd());
3952
3953         sc->last_reclaimed = sc->nr_reclaimed;
3954
3955         /*
3956          * To reduce the chance of going into the aging path, which can be
3957          * costly, optimistically skip it if the flag below was cleared in the
3958          * eviction path. This improves the overall performance when multiple
3959          * memcgs are available.
3960          */
3961         if (!sc->memcgs_need_aging) {
3962                 sc->memcgs_need_aging = true;
3963                 return;
3964         }
3965
3966         set_mm_walk(pgdat);
3967
3968         memcg = mem_cgroup_iter(NULL, NULL, NULL);
3969         do {
3970                 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
3971
3972                 if (age_lruvec(lruvec, sc, min_ttl))
3973                         success = true;
3974
3975                 cond_resched();
3976         } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
3977
3978         clear_mm_walk();
3979
3980         /* check the order to exclude compaction-induced reclaim */
3981         if (success || !min_ttl || sc->order)
3982                 return;
3983
3984         /*
3985          * The main goal is to OOM kill if every generation from all memcgs is
3986          * younger than min_ttl. However, another possibility is all memcgs are
3987          * either below min or empty.
3988          */
3989         if (mutex_trylock(&oom_lock)) {
3990                 struct oom_control oc = {
3991                         .gfp_mask = sc->gfp_mask,
3992                 };
3993
3994                 out_of_memory(&oc);
3995
3996                 mutex_unlock(&oom_lock);
3997         }
3998 }
3999
4000 /*
4001  * This function exploits spatial locality when shrink_page_list() walks the
4002  * rmap. It scans the adjacent PTEs of a young PTE and promotes hot pages. If
4003  * the scan was done cacheline efficiently, it adds the PMD entry pointing to
4004  * the PTE table to the Bloom filter. This forms a feedback loop between the
4005  * eviction and the aging.
4006  */
4007 void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
4008 {
4009         int i;
4010         pte_t *pte;
4011         unsigned long start;
4012         unsigned long end;
4013         unsigned long addr;
4014         struct lru_gen_mm_walk *walk;
4015         int young = 0;
4016         unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)] = {};
4017         struct page *page = pvmw->page;
4018         struct mem_cgroup *memcg = page_memcg(page);
4019         struct pglist_data *pgdat = page_pgdat(page);
4020         struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
4021         DEFINE_MAX_SEQ(lruvec);
4022         int old_gen, new_gen = lru_gen_from_seq(max_seq);
4023
4024         lockdep_assert_held(pvmw->ptl);
4025         VM_WARN_ON_ONCE_PAGE(PageLRU(page), page);
4026
4027         if (spin_is_contended(pvmw->ptl))
4028                 return;
4029
4030         /* avoid taking the LRU lock under the PTL when possible */
4031         walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL;
4032
4033         start = max(pvmw->address & PMD_MASK, pvmw->vma->vm_start);
4034         end = min(pvmw->address | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1;
4035
4036         if (end - start > MIN_LRU_BATCH * PAGE_SIZE) {
4037                 if (pvmw->address - start < MIN_LRU_BATCH * PAGE_SIZE / 2)
4038                         end = start + MIN_LRU_BATCH * PAGE_SIZE;
4039                 else if (end - pvmw->address < MIN_LRU_BATCH * PAGE_SIZE / 2)
4040                         start = end - MIN_LRU_BATCH * PAGE_SIZE;
4041                 else {
4042                         start = pvmw->address - MIN_LRU_BATCH * PAGE_SIZE / 2;
4043                         end = pvmw->address + MIN_LRU_BATCH * PAGE_SIZE / 2;
4044                 }
4045         }
4046
4047         pte = pvmw->pte - (pvmw->address - start) / PAGE_SIZE;
4048
4049         rcu_read_lock();
4050         arch_enter_lazy_mmu_mode();
4051
4052         for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) {
4053                 unsigned long pfn;
4054
4055                 pfn = get_pte_pfn(pte[i], pvmw->vma, addr);
4056                 if (pfn == -1)
4057                         continue;
4058
4059                 if (!pte_young(pte[i]))
4060                         continue;
4061
4062                 page = get_pfn_page(pfn, memcg, pgdat, !walk || walk->can_swap);
4063                 if (!page)
4064                         continue;
4065
4066                 if (!ptep_test_and_clear_young(pvmw->vma, addr, pte + i))
4067                         VM_WARN_ON_ONCE(true);
4068
4069                 young++;
4070
4071                 if (pte_dirty(pte[i]) && !PageDirty(page) &&
4072                     !(PageAnon(page) && PageSwapBacked(page) &&
4073                       !PageSwapCache(page)))
4074                         set_page_dirty(page);
4075
4076                 old_gen = page_lru_gen(page);
4077                 if (old_gen < 0)
4078                         SetPageReferenced(page);
4079                 else if (old_gen != new_gen)
4080                         __set_bit(i, bitmap);
4081         }
4082
4083         arch_leave_lazy_mmu_mode();
4084         rcu_read_unlock();
4085
4086         /* feedback from rmap walkers to page table walkers */
4087         if (suitable_to_scan(i, young))
4088                 update_bloom_filter(lruvec, max_seq, pvmw->pmd);
4089
4090         if (!walk && bitmap_weight(bitmap, MIN_LRU_BATCH) < PAGEVEC_SIZE) {
4091                 for_each_set_bit(i, bitmap, MIN_LRU_BATCH) {
4092                         page = pte_page(pte[i]);
4093                         activate_page(page);
4094                 }
4095                 return;
4096         }
4097
4098         /* page_update_gen() requires stable page_memcg() */
4099         if (!mem_cgroup_trylock_pages(memcg))
4100                 return;
4101
4102         if (!walk) {
4103                 spin_lock_irq(&lruvec_pgdat(lruvec)->lru_lock);
4104                 new_gen = lru_gen_from_seq(lruvec->lrugen.max_seq);
4105         }
4106
4107         for_each_set_bit(i, bitmap, MIN_LRU_BATCH) {
4108                 page = compound_head(pte_page(pte[i]));
4109                 if (page_memcg_rcu(page) != memcg)
4110                         continue;
4111
4112                 old_gen = page_update_gen(page, new_gen);
4113                 if (old_gen < 0 || old_gen == new_gen)
4114                         continue;
4115
4116                 if (walk)
4117                         update_batch_size(walk, page, old_gen, new_gen);
4118                 else
4119                         lru_gen_update_size(lruvec, page, old_gen, new_gen);
4120         }
4121
4122         if (!walk)
4123                 spin_unlock_irq(&lruvec_pgdat(lruvec)->lru_lock);
4124
4125         mem_cgroup_unlock_pages();
4126 }
4127
4128 /******************************************************************************
4129  *                          the eviction
4130  ******************************************************************************/
4131
4132 static bool sort_page(struct lruvec *lruvec, struct page *page, int tier_idx)
4133 {
4134         bool success;
4135         int gen = page_lru_gen(page);
4136         int type = page_is_file_cache(page);
4137         int zone = page_zonenum(page);
4138         int delta = hpage_nr_pages(page);
4139         int refs = page_lru_refs(page);
4140         int tier = lru_tier_from_refs(refs);
4141         struct lru_gen_struct *lrugen = &lruvec->lrugen;
4142
4143         VM_WARN_ON_ONCE_PAGE(gen >= MAX_NR_GENS, page);
4144
4145         /* unevictable */
4146         if (!page_evictable(page)) {
4147                 success = lru_gen_del_page(lruvec, page, true);
4148                 VM_WARN_ON_ONCE_PAGE(!success, page);
4149                 SetPageUnevictable(page);
4150                 add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
4151                 __count_vm_events(UNEVICTABLE_PGCULLED, delta);
4152                 return true;
4153         }
4154
4155         /* dirty lazyfree */
4156         if (type == LRU_GEN_FILE && PageAnon(page) && PageDirty(page)) {
4157                 enum lru_list lru = page_lru_base_type(page);
4158
4159                 success = lru_gen_del_page(lruvec, page, true);
4160                 VM_WARN_ON_ONCE_PAGE(!success, page);
4161                 SetPageSwapBacked(page);
4162                 add_page_to_lru_list_tail(page, lruvec, lru);
4163                 return true;
4164         }
4165
4166         /* promoted */
4167         if (gen != lru_gen_from_seq(lrugen->min_seq[type])) {
4168                 list_move(&page->lru, &lrugen->lists[gen][type][zone]);
4169                 return true;
4170         }
4171
4172         /* protected */
4173         if (tier > tier_idx) {
4174                 int hist = lru_hist_from_seq(lrugen->min_seq[type]);
4175
4176                 gen = page_inc_gen(lruvec, page, false);
4177                 list_move_tail(&page->lru, &lrugen->lists[gen][type][zone]);
4178
4179                 WRITE_ONCE(lrugen->protected[hist][type][tier - 1],
4180                            lrugen->protected[hist][type][tier - 1] + delta);
4181                 __mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE, delta);
4182                 return true;
4183         }
4184
4185         /* waiting for writeback */
4186         if (PageLocked(page) || PageWriteback(page) ||
4187             (type == LRU_GEN_FILE && PageDirty(page))) {
4188                 gen = page_inc_gen(lruvec, page, true);
4189                 list_move(&page->lru, &lrugen->lists[gen][type][zone]);
4190                 return true;
4191         }
4192
4193         return false;
4194 }
4195
4196 static bool isolate_page(struct lruvec *lruvec, struct page *page, struct scan_control *sc)
4197 {
4198         bool success;
4199
4200         /* unmapping inhibited */
4201         if (!sc->may_unmap && page_mapped(page))
4202                 return false;
4203
4204         /* swapping inhibited */
4205         if (!(sc->may_writepage && (sc->gfp_mask & __GFP_IO)) &&
4206             (PageDirty(page) ||
4207              (PageAnon(page) && !PageSwapCache(page))))
4208                 return false;
4209
4210         /* raced with release_pages() */
4211         if (!get_page_unless_zero(page))
4212                 return false;
4213
4214         /* raced with another isolation */
4215         if (!TestClearPageLRU(page)) {
4216                 put_page(page);
4217                 return false;
4218         }
4219
4220         /* see the comment on MAX_NR_TIERS */
4221         if (!PageReferenced(page))
4222                 set_mask_bits(&page->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0);
4223
4224         /* for shrink_page_list() */
4225         ClearPageReclaim(page);
4226         ClearPageReferenced(page);
4227
4228         success = lru_gen_del_page(lruvec, page, true);
4229         VM_WARN_ON_ONCE_PAGE(!success, page);
4230
4231         return true;
4232 }
4233
4234 static int scan_pages(struct lruvec *lruvec, struct scan_control *sc,
4235                       int type, int tier, struct list_head *list)
4236 {
4237         int gen, zone;
4238         enum vm_event_item item;
4239         int sorted = 0;
4240         int scanned = 0;
4241         int isolated = 0;
4242         int remaining = MAX_LRU_BATCH;
4243         struct lru_gen_struct *lrugen = &lruvec->lrugen;
4244         struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4245
4246         VM_WARN_ON_ONCE(!list_empty(list));
4247
4248         if (get_nr_gens(lruvec, type) == MIN_NR_GENS)
4249                 return 0;
4250
4251         gen = lru_gen_from_seq(lrugen->min_seq[type]);
4252
4253         for (zone = sc->reclaim_idx; zone >= 0; zone--) {
4254                 LIST_HEAD(moved);
4255                 int skipped = 0;
4256                 struct list_head *head = &lrugen->lists[gen][type][zone];
4257
4258                 while (!list_empty(head)) {
4259                         struct page *page = lru_to_page(head);
4260                         int delta = hpage_nr_pages(page);
4261
4262                         VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page);
4263                         VM_WARN_ON_ONCE_PAGE(PageActive(page), page);
4264                         VM_WARN_ON_ONCE_PAGE(page_is_file_cache(page) != type, page);
4265                         VM_WARN_ON_ONCE_PAGE(page_zonenum(page) != zone, page);
4266
4267                         scanned += delta;
4268
4269                         if (sort_page(lruvec, page, tier))
4270                                 sorted += delta;
4271                         else if (isolate_page(lruvec, page, sc)) {
4272                                 list_add(&page->lru, list);
4273                                 isolated += delta;
4274                         } else {
4275                                 list_move(&page->lru, &moved);
4276                                 skipped += delta;
4277                         }
4278
4279                         if (!--remaining || max(isolated, skipped) >= MIN_LRU_BATCH)
4280                                 break;
4281                 }
4282
4283                 if (skipped) {
4284                         list_splice(&moved, head);
4285                         __count_zid_vm_events(PGSCAN_SKIP, zone, skipped);
4286                 }
4287
4288                 if (!remaining || isolated >= MIN_LRU_BATCH)
4289                         break;
4290         }
4291
4292         item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
4293         if (!cgroup_reclaim(sc)) {
4294                 __count_vm_events(item, isolated);
4295                 __count_vm_events(PGREFILL, sorted);
4296         }
4297         __count_memcg_events(memcg, item, isolated);
4298         __count_memcg_events(memcg, PGREFILL, sorted);
4299
4300         /*
4301          * There might not be eligible pages due to reclaim_idx, may_unmap and
4302          * may_writepage. Check the remaining to prevent livelock if it's not
4303          * making progress.
4304          */
4305         return isolated || !remaining ? scanned : 0;
4306 }
4307
4308 static int get_tier_idx(struct lruvec *lruvec, int type)
4309 {
4310         int tier;
4311         struct ctrl_pos sp, pv;
4312
4313         /*
4314          * To leave a margin for fluctuations, use a larger gain factor (1:2).
4315          * This value is chosen because any other tier would have at least twice
4316          * as many refaults as the first tier.
4317          */
4318         read_ctrl_pos(lruvec, type, 0, 1, &sp);
4319         for (tier = 1; tier < MAX_NR_TIERS; tier++) {
4320                 read_ctrl_pos(lruvec, type, tier, 2, &pv);
4321                 if (!positive_ctrl_err(&sp, &pv))
4322                         break;
4323         }
4324
4325         return tier - 1;
4326 }
4327
4328 static int get_type_to_scan(struct lruvec *lruvec, int swappiness, int *tier_idx)
4329 {
4330         int type, tier;
4331         struct ctrl_pos sp, pv;
4332         int gain[ANON_AND_FILE] = { swappiness, 200 - swappiness };
4333
4334         /*
4335          * Compare the first tier of anon with that of file to determine which
4336          * type to scan. Also need to compare other tiers of the selected type
4337          * with the first tier of the other type to determine the last tier (of
4338          * the selected type) to evict.
4339          */
4340         read_ctrl_pos(lruvec, LRU_GEN_ANON, 0, gain[LRU_GEN_ANON], &sp);
4341         read_ctrl_pos(lruvec, LRU_GEN_FILE, 0, gain[LRU_GEN_FILE], &pv);
4342         type = positive_ctrl_err(&sp, &pv);
4343
4344         read_ctrl_pos(lruvec, !type, 0, gain[!type], &sp);
4345         for (tier = 1; tier < MAX_NR_TIERS; tier++) {
4346                 read_ctrl_pos(lruvec, type, tier, gain[type], &pv);
4347                 if (!positive_ctrl_err(&sp, &pv))
4348                         break;
4349         }
4350
4351         *tier_idx = tier - 1;
4352
4353         return type;
4354 }
4355
4356 static int isolate_pages(struct lruvec *lruvec, struct scan_control *sc, int swappiness,
4357                          int *type_scanned, struct list_head *list)
4358 {
4359         int i;
4360         int type;
4361         int scanned;
4362         int tier = -1;
4363         DEFINE_MIN_SEQ(lruvec);
4364
4365         /*
4366          * Try to make the obvious choice first. When anon and file are both
4367          * available from the same generation, interpret swappiness 1 as file
4368          * first and 200 as anon first.
4369          */
4370         if (!swappiness)
4371                 type = LRU_GEN_FILE;
4372         else if (min_seq[LRU_GEN_ANON] < min_seq[LRU_GEN_FILE])
4373                 type = LRU_GEN_ANON;
4374         else if (swappiness == 1)
4375                 type = LRU_GEN_FILE;
4376         else if (swappiness == 200)
4377                 type = LRU_GEN_ANON;
4378         else
4379                 type = get_type_to_scan(lruvec, swappiness, &tier);
4380
4381         for (i = !swappiness; i < ANON_AND_FILE; i++) {
4382                 if (tier < 0)
4383                         tier = get_tier_idx(lruvec, type);
4384
4385                 scanned = scan_pages(lruvec, sc, type, tier, list);
4386                 if (scanned)
4387                         break;
4388
4389                 type = !type;
4390                 tier = -1;
4391         }
4392
4393         *type_scanned = type;
4394
4395         return scanned;
4396 }
4397
4398 static int evict_pages(struct lruvec *lruvec, struct scan_control *sc, int swappiness,
4399                        bool *need_swapping)
4400 {
4401         int type;
4402         int scanned;
4403         int reclaimed;
4404         LIST_HEAD(list);
4405         struct page *page;
4406         enum vm_event_item item;
4407         struct reclaim_stat stat;
4408         struct lru_gen_mm_walk *walk;
4409         struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4410         struct pglist_data *pgdat = lruvec_pgdat(lruvec);
4411
4412         spin_lock_irq(&lruvec_pgdat(lruvec)->lru_lock);
4413
4414         scanned = isolate_pages(lruvec, sc, swappiness, &type, &list);
4415
4416         scanned += try_to_inc_min_seq(lruvec, swappiness);
4417
4418         if (get_nr_gens(lruvec, !swappiness) == MIN_NR_GENS)
4419                 scanned = 0;
4420
4421         spin_unlock_irq(&lruvec_pgdat(lruvec)->lru_lock);
4422
4423         if (list_empty(&list))
4424                 return scanned;
4425
4426         reclaimed = shrink_page_list(&list, pgdat, sc, &stat, false);
4427
4428         list_for_each_entry(page, &list, lru) {
4429                 /* restore LRU_REFS_FLAGS cleared by isolate_page() */
4430                 if (PageWorkingset(page))
4431                         SetPageReferenced(page);
4432
4433                 /* don't add rejected pages to the oldest generation */
4434                 if (PageReclaim(page) &&
4435                     (PageDirty(page) || PageWriteback(page)))
4436                         ClearPageActive(page);
4437                 else
4438                         SetPageActive(page);
4439         }
4440
4441         spin_lock_irq(&lruvec_pgdat(lruvec)->lru_lock);
4442
4443         move_pages_to_lru(lruvec, &list);
4444
4445         walk = current->reclaim_state->mm_walk;
4446         if (walk && walk->batched)
4447                 reset_batch_size(lruvec, walk);
4448
4449         item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
4450         if (!cgroup_reclaim(sc))
4451                 __count_vm_events(item, reclaimed);
4452         __count_memcg_events(memcg, item, reclaimed);
4453
4454         spin_unlock_irq(&lruvec_pgdat(lruvec)->lru_lock);
4455
4456         mem_cgroup_uncharge_list(&list);
4457         free_unref_page_list(&list);
4458
4459         sc->nr_reclaimed += reclaimed;
4460
4461         if (need_swapping && type == LRU_GEN_ANON)
4462                 *need_swapping = true;
4463
4464         return scanned;
4465 }
4466
4467 /*
4468  * For future optimizations:
4469  * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
4470  *    reclaim.
4471  */
4472 static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc,
4473                                     bool can_swap, bool *need_aging)
4474 {
4475         unsigned long nr_to_scan;
4476         struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4477         DEFINE_MAX_SEQ(lruvec);
4478         DEFINE_MIN_SEQ(lruvec);
4479
4480         if (mem_cgroup_below_min(memcg) ||
4481             (mem_cgroup_below_low(memcg) && !sc->memcg_low_reclaim))
4482                 return 0;
4483
4484         *need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, can_swap, &nr_to_scan);
4485         if (!*need_aging)
4486                 return nr_to_scan;
4487
4488         /* skip the aging path at the default priority */
4489         if (sc->priority == DEF_PRIORITY)
4490                 goto done;
4491
4492         /* leave the work to lru_gen_age_node() */
4493         if (current_is_kswapd())
4494                 return 0;
4495
4496         if (try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false))
4497                 return nr_to_scan;
4498 done:
4499         return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0;
4500 }
4501
4502 static bool should_abort_scan(struct lruvec *lruvec, unsigned long seq,
4503                               struct scan_control *sc, bool need_swapping)
4504 {
4505         int i;
4506         DEFINE_MAX_SEQ(lruvec);
4507
4508         if (!current_is_kswapd()) {
4509                 /* age each memcg at most once to ensure fairness */
4510                 if (max_seq - seq > 1)
4511                         return true;
4512
4513                 /* over-swapping can increase allocation latency */
4514                 if (sc->nr_reclaimed >= sc->nr_to_reclaim && need_swapping)
4515                         return true;
4516
4517                 /* give this thread a chance to exit and free its memory */
4518                 if (fatal_signal_pending(current)) {
4519                         sc->nr_reclaimed += MIN_LRU_BATCH;
4520                         return true;
4521                 }
4522
4523                 if (cgroup_reclaim(sc))
4524                         return false;
4525         } else if (sc->nr_reclaimed - sc->last_reclaimed < sc->nr_to_reclaim)
4526                 return false;
4527
4528         /* keep scanning at low priorities to ensure fairness */
4529         if (sc->priority > DEF_PRIORITY - 2)
4530                 return false;
4531
4532         /*
4533          * A minimum amount of work was done under global memory pressure. For
4534          * kswapd, it may be overshooting. For direct reclaim, the allocation
4535          * may succeed if all suitable zones are somewhat safe. In either case,
4536          * it's better to stop now, and restart later if necessary.
4537          */
4538         for (i = 0; i <= sc->reclaim_idx; i++) {
4539                 unsigned long wmark;
4540                 struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i;
4541
4542                 if (!managed_zone(zone))
4543                         continue;
4544
4545                 wmark = current_is_kswapd() ? high_wmark_pages(zone) : low_wmark_pages(zone);
4546                 if (wmark > zone_page_state(zone, NR_FREE_PAGES))
4547                         return false;
4548         }
4549
4550         sc->nr_reclaimed += MIN_LRU_BATCH;
4551
4552         return true;
4553 }
4554
4555 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
4556 {
4557         struct blk_plug plug;
4558         bool need_aging = false;
4559         bool need_swapping = false;
4560         unsigned long scanned = 0;
4561         unsigned long reclaimed = sc->nr_reclaimed;
4562         DEFINE_MAX_SEQ(lruvec);
4563
4564         lru_add_drain();
4565
4566         blk_start_plug(&plug);
4567
4568         set_mm_walk(lruvec_pgdat(lruvec));
4569
4570         while (true) {
4571                 int delta;
4572                 int swappiness;
4573                 unsigned long nr_to_scan;
4574
4575                 if (sc->may_swap)
4576                         swappiness = get_swappiness(lruvec, sc);
4577                 else if (!cgroup_reclaim(sc) && get_swappiness(lruvec, sc))
4578                         swappiness = 1;
4579                 else
4580                         swappiness = 0;
4581
4582                 nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness, &need_aging);
4583                 if (!nr_to_scan)
4584                         goto done;
4585
4586                 delta = evict_pages(lruvec, sc, swappiness, &need_swapping);
4587                 if (!delta)
4588                         goto done;
4589
4590                 scanned += delta;
4591                 if (scanned >= nr_to_scan)
4592                         break;
4593
4594                 if (should_abort_scan(lruvec, max_seq, sc, need_swapping))
4595                         break;
4596
4597                 cond_resched();
4598         }
4599
4600         /* see the comment in lru_gen_age_node() */
4601         if (sc->nr_reclaimed - reclaimed >= MIN_LRU_BATCH && !need_aging)
4602                 sc->memcgs_need_aging = false;
4603 done:
4604         clear_mm_walk();
4605
4606         blk_finish_plug(&plug);
4607 }
4608
4609 /******************************************************************************
4610  *                          state change
4611  ******************************************************************************/
4612
4613 static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
4614 {
4615         struct lru_gen_struct *lrugen = &lruvec->lrugen;
4616
4617         if (lrugen->enabled) {
4618                 enum lru_list lru;
4619
4620                 for_each_evictable_lru(lru) {
4621                         if (!list_empty(&lruvec->lists[lru]))
4622                                 return false;
4623                 }
4624         } else {
4625                 int gen, type, zone;
4626
4627                 for_each_gen_type_zone(gen, type, zone) {
4628                         if (!list_empty(&lrugen->lists[gen][type][zone]))
4629                                 return false;
4630                 }
4631         }
4632
4633         return true;
4634 }
4635
4636 static bool fill_evictable(struct lruvec *lruvec)
4637 {
4638         enum lru_list lru;
4639         int remaining = MAX_LRU_BATCH;
4640
4641         for_each_evictable_lru(lru) {
4642                 int type = is_file_lru(lru);
4643                 bool active = is_active_lru(lru);
4644                 struct list_head *head = &lruvec->lists[lru];
4645
4646                 while (!list_empty(head)) {
4647                         bool success;
4648                         struct page *page = lru_to_page(head);
4649                         enum lru_list lru = page_lru_base_type(page);
4650
4651                         VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page);
4652                         VM_WARN_ON_ONCE_PAGE(PageActive(page) != active, page);
4653                         VM_WARN_ON_ONCE_PAGE(page_is_file_cache(page) != type, page);
4654                         VM_WARN_ON_ONCE_PAGE(page_lru_gen(page) != -1, page);
4655
4656                         del_page_from_lru_list(page, lruvec, lru);
4657                         success = lru_gen_add_page(lruvec, page, false);
4658                         VM_WARN_ON_ONCE(!success);
4659
4660                         if (!--remaining)
4661                                 return false;
4662                 }
4663         }
4664
4665         return true;
4666 }
4667
4668 static bool drain_evictable(struct lruvec *lruvec)
4669 {
4670         int gen, type, zone;
4671         int remaining = MAX_LRU_BATCH;
4672
4673         for_each_gen_type_zone(gen, type, zone) {
4674                 struct list_head *head = &lruvec->lrugen.lists[gen][type][zone];
4675
4676                 while (!list_empty(head)) {
4677                         bool success;
4678                         struct page *page = lru_to_page(head);
4679                         enum lru_list lru = page_lru_base_type(page);
4680
4681                         VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page);
4682                         VM_WARN_ON_ONCE_PAGE(PageActive(page), page);
4683                         VM_WARN_ON_ONCE_PAGE(page_is_file_cache(page) != type, page);
4684                         VM_WARN_ON_ONCE_PAGE(page_zonenum(page) != zone, page);
4685
4686                         success = lru_gen_del_page(lruvec, page, false);
4687                         VM_WARN_ON_ONCE(!success);
4688                         add_page_to_lru_list(page, lruvec, lru);
4689
4690                         if (!--remaining)
4691                                 return false;
4692                 }
4693         }
4694
4695         return true;
4696 }
4697
4698 static void lru_gen_change_state(bool enabled)
4699 {
4700         static DEFINE_MUTEX(state_mutex);
4701
4702         struct mem_cgroup *memcg;
4703
4704         cgroup_lock();
4705         cpus_read_lock();
4706         get_online_mems();
4707         mutex_lock(&state_mutex);
4708
4709         if (enabled == lru_gen_enabled())
4710                 goto unlock;
4711
4712         if (enabled)
4713                 static_branch_enable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]);
4714         else
4715                 static_branch_disable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]);
4716
4717         memcg = mem_cgroup_iter(NULL, NULL, NULL);
4718         do {
4719                 int nid;
4720
4721                 for_each_node(nid) {
4722                         struct lruvec *lruvec = get_lruvec(memcg, nid);
4723
4724                         if (!lruvec)
4725                                 continue;
4726
4727                         spin_lock_irq(&lruvec_pgdat(lruvec)->lru_lock);
4728
4729                         VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
4730                         VM_WARN_ON_ONCE(!state_is_valid(lruvec));
4731
4732                         lruvec->lrugen.enabled = enabled;
4733
4734                         while (!(enabled ? fill_evictable(lruvec) : drain_evictable(lruvec))) {
4735                                 spin_unlock_irq(&lruvec_pgdat(lruvec)->lru_lock);
4736                                 cond_resched();
4737                                 spin_lock_irq(&lruvec_pgdat(lruvec)->lru_lock);
4738                         }
4739
4740                         spin_unlock_irq(&lruvec_pgdat(lruvec)->lru_lock);
4741                 }
4742
4743                 cond_resched();
4744         } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
4745 unlock:
4746         mutex_unlock(&state_mutex);
4747         put_online_mems();
4748         cpus_read_unlock();
4749         cgroup_unlock();
4750 }
4751
4752 /******************************************************************************
4753  *                          sysfs interface
4754  ******************************************************************************/
4755
4756 static ssize_t show_min_ttl(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
4757 {
4758         return sprintf(buf, "%u\n", jiffies_to_msecs(READ_ONCE(lru_gen_min_ttl)));
4759 }
4760
4761 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
4762 static ssize_t store_min_ttl(struct kobject *kobj, struct kobj_attribute *attr,
4763                              const char *buf, size_t len)
4764 {
4765         unsigned int msecs;
4766
4767         if (kstrtouint(buf, 0, &msecs))
4768                 return -EINVAL;
4769
4770         WRITE_ONCE(lru_gen_min_ttl, msecs_to_jiffies(msecs));
4771
4772         return len;
4773 }
4774
4775 static struct kobj_attribute lru_gen_min_ttl_attr = __ATTR(
4776         min_ttl_ms, 0644, show_min_ttl, store_min_ttl
4777 );
4778
4779 static ssize_t show_enabled(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
4780 {
4781         unsigned int caps = 0;
4782
4783         if (get_cap(LRU_GEN_CORE))
4784                 caps |= BIT(LRU_GEN_CORE);
4785
4786         if (arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))
4787                 caps |= BIT(LRU_GEN_MM_WALK);
4788
4789         if (IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) && get_cap(LRU_GEN_NONLEAF_YOUNG))
4790                 caps |= BIT(LRU_GEN_NONLEAF_YOUNG);
4791
4792         return snprintf(buf, PAGE_SIZE, "0x%04x\n", caps);
4793 }
4794
4795 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
4796 static ssize_t store_enabled(struct kobject *kobj, struct kobj_attribute *attr,
4797                              const char *buf, size_t len)
4798 {
4799         int i;
4800         unsigned int caps;
4801
4802         if (tolower(*buf) == 'n')
4803                 caps = 0;
4804         else if (tolower(*buf) == 'y')
4805                 caps = -1;
4806         else if (kstrtouint(buf, 0, &caps))
4807                 return -EINVAL;
4808
4809         for (i = 0; i < NR_LRU_GEN_CAPS; i++) {
4810                 bool enabled = caps & BIT(i);
4811
4812                 if (i == LRU_GEN_CORE)
4813                         lru_gen_change_state(enabled);
4814                 else if (enabled)
4815                         static_branch_enable(&lru_gen_caps[i]);
4816                 else
4817                         static_branch_disable(&lru_gen_caps[i]);
4818         }
4819
4820         return len;
4821 }
4822
4823 static struct kobj_attribute lru_gen_enabled_attr = __ATTR(
4824         enabled, 0644, show_enabled, store_enabled
4825 );
4826
4827 static struct attribute *lru_gen_attrs[] = {
4828         &lru_gen_min_ttl_attr.attr,
4829         &lru_gen_enabled_attr.attr,
4830         NULL
4831 };
4832
4833 static struct attribute_group lru_gen_attr_group = {
4834         .name = "lru_gen",
4835         .attrs = lru_gen_attrs,
4836 };
4837
4838 /******************************************************************************
4839  *                          debugfs interface
4840  ******************************************************************************/
4841
4842 static void *lru_gen_seq_start(struct seq_file *m, loff_t *pos)
4843 {
4844         struct mem_cgroup *memcg;
4845         loff_t nr_to_skip = *pos;
4846
4847         m->private = kvmalloc(PATH_MAX, GFP_KERNEL);
4848         if (!m->private)
4849                 return ERR_PTR(-ENOMEM);
4850
4851         memcg = mem_cgroup_iter(NULL, NULL, NULL);
4852         do {
4853                 int nid;
4854
4855                 for_each_node_state(nid, N_MEMORY) {
4856                         if (!nr_to_skip--)
4857                                 return get_lruvec(memcg, nid);
4858                 }
4859         } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
4860
4861         return NULL;
4862 }
4863
4864 static void lru_gen_seq_stop(struct seq_file *m, void *v)
4865 {
4866         if (!IS_ERR_OR_NULL(v))
4867                 mem_cgroup_iter_break(NULL, lruvec_memcg(v));
4868
4869         kvfree(m->private);
4870         m->private = NULL;
4871 }
4872
4873 static void *lru_gen_seq_next(struct seq_file *m, void *v, loff_t *pos)
4874 {
4875         int nid = lruvec_pgdat(v)->node_id;
4876         struct mem_cgroup *memcg = lruvec_memcg(v);
4877
4878         ++*pos;
4879
4880         nid = next_memory_node(nid);
4881         if (nid == MAX_NUMNODES) {
4882                 memcg = mem_cgroup_iter(NULL, memcg, NULL);
4883                 if (!memcg)
4884                         return NULL;
4885
4886                 nid = first_memory_node;
4887         }
4888
4889         return get_lruvec(memcg, nid);
4890 }
4891
4892 static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec,
4893                                   unsigned long max_seq, unsigned long *min_seq,
4894                                   unsigned long seq)
4895 {
4896         int i;
4897         int type, tier;
4898         int hist = lru_hist_from_seq(seq);
4899         struct lru_gen_struct *lrugen = &lruvec->lrugen;
4900
4901         for (tier = 0; tier < MAX_NR_TIERS; tier++) {
4902                 seq_printf(m, "            %10d", tier);
4903                 for (type = 0; type < ANON_AND_FILE; type++) {
4904                         const char *s = "   ";
4905                         unsigned long n[3] = {};
4906
4907                         if (seq == max_seq) {
4908                                 s = "RT ";
4909                                 n[0] = READ_ONCE(lrugen->avg_refaulted[type][tier]);
4910                                 n[1] = READ_ONCE(lrugen->avg_total[type][tier]);
4911                         } else if (seq == min_seq[type] || NR_HIST_GENS > 1) {
4912                                 s = "rep";
4913                                 n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]);
4914                                 n[1] = atomic_long_read(&lrugen->evicted[hist][type][tier]);
4915                                 if (tier)
4916                                         n[2] = READ_ONCE(lrugen->protected[hist][type][tier - 1]);
4917                         }
4918
4919                         for (i = 0; i < 3; i++)
4920                                 seq_printf(m, " %10lu%c", n[i], s[i]);
4921                 }
4922                 seq_putc(m, '\n');
4923         }
4924
4925         seq_puts(m, "                      ");
4926         for (i = 0; i < NR_MM_STATS; i++) {
4927                 const char *s = "      ";
4928                 unsigned long n = 0;
4929
4930                 if (seq == max_seq && NR_HIST_GENS == 1) {
4931                         s = "LOYNFA";
4932                         n = READ_ONCE(lruvec->mm_state.stats[hist][i]);
4933                 } else if (seq != max_seq && NR_HIST_GENS > 1) {
4934                         s = "loynfa";
4935                         n = READ_ONCE(lruvec->mm_state.stats[hist][i]);
4936                 }
4937
4938                 seq_printf(m, " %10lu%c", n, s[i]);
4939         }
4940         seq_putc(m, '\n');
4941 }
4942
4943 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
4944 static int lru_gen_seq_show(struct seq_file *m, void *v)
4945 {
4946         unsigned long seq;
4947         bool full = !debugfs_real_fops(m->file)->write;
4948         struct lruvec *lruvec = v;
4949         struct lru_gen_struct *lrugen = &lruvec->lrugen;
4950         int nid = lruvec_pgdat(lruvec)->node_id;
4951         struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4952         DEFINE_MAX_SEQ(lruvec);
4953         DEFINE_MIN_SEQ(lruvec);
4954
4955         if (nid == first_memory_node) {
4956                 const char *path = memcg ? m->private : "";
4957
4958 #ifdef CONFIG_MEMCG
4959                 if (memcg)
4960                         cgroup_path(memcg->css.cgroup, m->private, PATH_MAX);
4961 #endif
4962                 seq_printf(m, "memcg %5hu %s\n", mem_cgroup_id(memcg), path);
4963         }
4964
4965         seq_printf(m, " node %5d\n", nid);
4966
4967         if (!full)
4968                 seq = min_seq[LRU_GEN_ANON];
4969         else if (max_seq >= MAX_NR_GENS)
4970                 seq = max_seq - MAX_NR_GENS + 1;
4971         else
4972                 seq = 0;
4973
4974         for (; seq <= max_seq; seq++) {
4975                 int type, zone;
4976                 int gen = lru_gen_from_seq(seq);
4977                 unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
4978
4979                 seq_printf(m, " %10lu %10u", seq, jiffies_to_msecs(jiffies - birth));
4980
4981                 for (type = 0; type < ANON_AND_FILE; type++) {
4982                         unsigned long size = 0;
4983                         char mark = full && seq < min_seq[type] ? 'x' : ' ';
4984
4985                         for (zone = 0; zone < MAX_NR_ZONES; zone++)
4986                                 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
4987
4988                         seq_printf(m, " %10lu%c", size, mark);
4989                 }
4990
4991                 seq_putc(m, '\n');
4992
4993                 if (full)
4994                         lru_gen_seq_show_full(m, lruvec, max_seq, min_seq, seq);
4995         }
4996
4997         return 0;
4998 }
4999
5000 static const struct seq_operations lru_gen_seq_ops = {
5001         .start = lru_gen_seq_start,
5002         .stop = lru_gen_seq_stop,
5003         .next = lru_gen_seq_next,
5004         .show = lru_gen_seq_show,
5005 };
5006
5007 static int run_aging(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc,
5008                      bool can_swap, bool force_scan)
5009 {
5010         DEFINE_MAX_SEQ(lruvec);
5011         DEFINE_MIN_SEQ(lruvec);
5012
5013         if (seq < max_seq)
5014                 return 0;
5015
5016         if (seq > max_seq)
5017                 return -EINVAL;
5018
5019         if (!force_scan && min_seq[!can_swap] + MAX_NR_GENS - 1 <= max_seq)
5020                 return -ERANGE;
5021
5022         try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, force_scan);
5023
5024         return 0;
5025 }
5026
5027 static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc,
5028                         int swappiness, unsigned long nr_to_reclaim)
5029 {
5030         DEFINE_MAX_SEQ(lruvec);
5031
5032         if (seq + MIN_NR_GENS > max_seq)
5033                 return -EINVAL;
5034
5035         sc->nr_reclaimed = 0;
5036
5037         while (!signal_pending(current)) {
5038                 DEFINE_MIN_SEQ(lruvec);
5039
5040                 if (seq < min_seq[!swappiness])
5041                         return 0;
5042
5043                 if (sc->nr_reclaimed >= nr_to_reclaim)
5044                         return 0;
5045
5046                 if (!evict_pages(lruvec, sc, swappiness, NULL))
5047                         return 0;
5048
5049                 cond_resched();
5050         }
5051
5052         return -EINTR;
5053 }
5054
5055 static int run_cmd(char cmd, int memcg_id, int nid, unsigned long seq,
5056                    struct scan_control *sc, int swappiness, unsigned long opt)
5057 {
5058         struct lruvec *lruvec;
5059         int err = -EINVAL;
5060         struct mem_cgroup *memcg = NULL;
5061
5062         if (nid < 0 || nid >= MAX_NUMNODES || !node_state(nid, N_MEMORY))
5063                 return -EINVAL;
5064
5065         if (!mem_cgroup_disabled()) {
5066                 rcu_read_lock();
5067                 memcg = mem_cgroup_from_id(memcg_id);
5068 #ifdef CONFIG_MEMCG
5069                 if (memcg && !css_tryget(&memcg->css))
5070                         memcg = NULL;
5071 #endif
5072                 rcu_read_unlock();
5073
5074                 if (!memcg)
5075                         return -EINVAL;
5076         }
5077
5078         if (memcg_id != mem_cgroup_id(memcg))
5079                 goto done;
5080
5081         lruvec = get_lruvec(memcg, nid);
5082
5083         if (swappiness < 0)
5084                 swappiness = get_swappiness(lruvec, sc);
5085         else if (swappiness > 200)
5086                 goto done;
5087
5088         switch (cmd) {
5089         case '+':
5090                 err = run_aging(lruvec, seq, sc, swappiness, opt);
5091                 break;
5092         case '-':
5093                 err = run_eviction(lruvec, seq, sc, swappiness, opt);
5094                 break;
5095         }
5096 done:
5097         mem_cgroup_put(memcg);
5098
5099         return err;
5100 }
5101
5102 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5103 static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
5104                                  size_t len, loff_t *pos)
5105 {
5106         void *buf;
5107         char *cur, *next;
5108         unsigned int flags;
5109         struct blk_plug plug;
5110         int err = -EINVAL;
5111         struct scan_control sc = {
5112                 .may_writepage = true,
5113                 .may_unmap = true,
5114                 .may_swap = true,
5115                 .reclaim_idx = MAX_NR_ZONES - 1,
5116                 .gfp_mask = GFP_KERNEL,
5117         };
5118
5119         buf = kvmalloc(len + 1, GFP_KERNEL);
5120         if (!buf)
5121                 return -ENOMEM;
5122
5123         if (copy_from_user(buf, src, len)) {
5124                 kvfree(buf);
5125                 return -EFAULT;
5126         }
5127
5128         set_task_reclaim_state(current, &sc.reclaim_state);
5129         flags = memalloc_noreclaim_save();
5130         blk_start_plug(&plug);
5131         if (!set_mm_walk(NULL)) {
5132                 err = -ENOMEM;
5133                 goto done;
5134         }
5135
5136         next = buf;
5137         next[len] = '\0';
5138
5139         while ((cur = strsep(&next, ",;\n"))) {
5140                 int n;
5141                 int end;
5142                 char cmd;
5143                 unsigned int memcg_id;
5144                 unsigned int nid;
5145                 unsigned long seq;
5146                 unsigned int swappiness = -1;
5147                 unsigned long opt = -1;
5148
5149                 cur = skip_spaces(cur);
5150                 if (!*cur)
5151                         continue;
5152
5153                 n = sscanf(cur, "%c %u %u %lu %n %u %n %lu %n", &cmd, &memcg_id, &nid,
5154                            &seq, &end, &swappiness, &end, &opt, &end);
5155                 if (n < 4 || cur[end]) {
5156                         err = -EINVAL;
5157                         break;
5158                 }
5159
5160                 err = run_cmd(cmd, memcg_id, nid, seq, &sc, swappiness, opt);
5161                 if (err)
5162                         break;
5163         }
5164 done:
5165         clear_mm_walk();
5166         blk_finish_plug(&plug);
5167         memalloc_noreclaim_restore(flags);
5168         set_task_reclaim_state(current, NULL);
5169
5170         kvfree(buf);
5171
5172         return err ? : len;
5173 }
5174
5175 static int lru_gen_seq_open(struct inode *inode, struct file *file)
5176 {
5177         return seq_open(file, &lru_gen_seq_ops);
5178 }
5179
5180 static const struct file_operations lru_gen_rw_fops = {
5181         .open = lru_gen_seq_open,
5182         .read = seq_read,
5183         .write = lru_gen_seq_write,
5184         .llseek = seq_lseek,
5185         .release = seq_release,
5186 };
5187
5188 static const struct file_operations lru_gen_ro_fops = {
5189         .open = lru_gen_seq_open,
5190         .read = seq_read,
5191         .llseek = seq_lseek,
5192         .release = seq_release,
5193 };
5194
5195 /******************************************************************************
5196  *                          initialization
5197  ******************************************************************************/
5198
5199 void lru_gen_init_lruvec(struct lruvec *lruvec)
5200 {
5201         int i;
5202         int gen, type, zone;
5203         struct lru_gen_struct *lrugen = &lruvec->lrugen;
5204
5205         lrugen->max_seq = MIN_NR_GENS + 1;
5206         lrugen->enabled = lru_gen_enabled();
5207
5208         for (i = 0; i <= MIN_NR_GENS + 1; i++)
5209                 lrugen->timestamps[i] = jiffies;
5210
5211         for_each_gen_type_zone(gen, type, zone)
5212                 INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]);
5213
5214         lruvec->mm_state.seq = MIN_NR_GENS;
5215 }
5216
5217 #ifdef CONFIG_MEMCG
5218 void lru_gen_init_memcg(struct mem_cgroup *memcg)
5219 {
5220         INIT_LIST_HEAD(&memcg->mm_list.fifo);
5221         spin_lock_init(&memcg->mm_list.lock);
5222 }
5223
5224 void lru_gen_exit_memcg(struct mem_cgroup *memcg)
5225 {
5226         int i;
5227         int nid;
5228
5229         for_each_node(nid) {
5230                 struct lruvec *lruvec = get_lruvec(memcg, nid);
5231
5232                 VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0,
5233                                            sizeof(lruvec->lrugen.nr_pages)));
5234
5235                 for (i = 0; i < NR_BLOOM_FILTERS; i++) {
5236                         bitmap_free(lruvec->mm_state.filters[i]);
5237                         lruvec->mm_state.filters[i] = NULL;
5238                 }
5239         }
5240 }
5241 #endif
5242
5243 static int __init init_lru_gen(void)
5244 {
5245         BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS);
5246         BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS);
5247
5248         if (sysfs_create_group(mm_kobj, &lru_gen_attr_group))
5249                 pr_err("lru_gen: failed to create sysfs group\n");
5250
5251         debugfs_create_file("lru_gen", 0644, NULL, NULL, &lru_gen_rw_fops);
5252         debugfs_create_file("lru_gen_full", 0444, NULL, NULL, &lru_gen_ro_fops);
5253
5254         return 0;
5255 };
5256 late_initcall(init_lru_gen);
5257
5258 #else /* !CONFIG_LRU_GEN */
5259
5260 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
5261 {
5262 }
5263
5264 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
5265 {
5266 }
5267
5268 #endif /* CONFIG_LRU_GEN */
5269
5270 static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
5271 {
5272         unsigned long nr[NR_LRU_LISTS];
5273         unsigned long targets[NR_LRU_LISTS];
5274         unsigned long nr_to_scan;
5275         enum lru_list lru;
5276         unsigned long nr_reclaimed = 0;
5277         unsigned long nr_to_reclaim = sc->nr_to_reclaim;
5278         struct blk_plug plug;
5279         bool scan_adjusted;
5280
5281         if (lru_gen_enabled()) {
5282                 lru_gen_shrink_lruvec(lruvec, sc);
5283                 return;
5284         }
5285
5286         get_scan_count(lruvec, sc, nr);
5287
5288         /* Record the original scan target for proportional adjustments later */
5289         memcpy(targets, nr, sizeof(nr));
5290
5291         /*
5292          * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
5293          * event that can occur when there is little memory pressure e.g.
5294          * multiple streaming readers/writers. Hence, we do not abort scanning
5295          * when the requested number of pages are reclaimed when scanning at
5296          * DEF_PRIORITY on the assumption that the fact we are direct
5297          * reclaiming implies that kswapd is not keeping up and it is best to
5298          * do a batch of work at once. For memcg reclaim one check is made to
5299          * abort proportional reclaim if either the file or anon lru has already
5300          * dropped to zero at the first pass.
5301          */
5302         scan_adjusted = (!cgroup_reclaim(sc) && !current_is_kswapd() &&
5303                          sc->priority == DEF_PRIORITY);
5304
5305         blk_start_plug(&plug);
5306         while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
5307                                         nr[LRU_INACTIVE_FILE]) {
5308                 unsigned long nr_anon, nr_file, percentage;
5309                 unsigned long nr_scanned;
5310
5311                 for_each_evictable_lru(lru) {
5312                         if (nr[lru]) {
5313                                 nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
5314                                 nr[lru] -= nr_to_scan;
5315
5316                                 nr_reclaimed += shrink_list(lru, nr_to_scan,
5317                                                             lruvec, sc);
5318                         }
5319                 }
5320
5321                 cond_resched();
5322
5323                 if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
5324                         continue;
5325
5326                 /*
5327                  * For kswapd and memcg, reclaim at least the number of pages
5328                  * requested. Ensure that the anon and file LRUs are scanned
5329                  * proportionally what was requested by get_scan_count(). We
5330                  * stop reclaiming one LRU and reduce the amount scanning
5331                  * proportional to the original scan target.
5332                  */
5333                 nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
5334                 nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
5335
5336                 /*
5337                  * It's just vindictive to attack the larger once the smaller
5338                  * has gone to zero.  And given the way we stop scanning the
5339                  * smaller below, this makes sure that we only make one nudge
5340                  * towards proportionality once we've got nr_to_reclaim.
5341                  */
5342                 if (!nr_file || !nr_anon)
5343                         break;
5344
5345                 if (nr_file > nr_anon) {
5346                         unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
5347                                                 targets[LRU_ACTIVE_ANON] + 1;
5348                         lru = LRU_BASE;
5349                         percentage = nr_anon * 100 / scan_target;
5350                 } else {
5351                         unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
5352                                                 targets[LRU_ACTIVE_FILE] + 1;
5353                         lru = LRU_FILE;
5354                         percentage = nr_file * 100 / scan_target;
5355                 }
5356
5357                 /* Stop scanning the smaller of the LRU */
5358                 nr[lru] = 0;
5359                 nr[lru + LRU_ACTIVE] = 0;
5360
5361                 /*
5362                  * Recalculate the other LRU scan count based on its original
5363                  * scan target and the percentage scanning already complete
5364                  */
5365                 lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
5366                 nr_scanned = targets[lru] - nr[lru];
5367                 nr[lru] = targets[lru] * (100 - percentage) / 100;
5368                 nr[lru] -= min(nr[lru], nr_scanned);
5369
5370                 lru += LRU_ACTIVE;
5371                 nr_scanned = targets[lru] - nr[lru];
5372                 nr[lru] = targets[lru] * (100 - percentage) / 100;
5373                 nr[lru] -= min(nr[lru], nr_scanned);
5374
5375                 scan_adjusted = true;
5376         }
5377         blk_finish_plug(&plug);
5378         sc->nr_reclaimed += nr_reclaimed;
5379
5380         /*
5381          * Even if we did not try to evict anon pages at all, we want to
5382          * rebalance the anon lru active/inactive ratio.
5383          */
5384         if (total_swap_pages && inactive_is_low(lruvec, LRU_INACTIVE_ANON))
5385                 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
5386                                    sc, LRU_ACTIVE_ANON);
5387 }
5388
5389 /* Use reclaim/compaction for costly allocs or under memory pressure */
5390 static bool in_reclaim_compaction(struct scan_control *sc)
5391 {
5392         if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
5393                         (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
5394                          sc->priority < DEF_PRIORITY - 2))
5395                 return true;
5396
5397         return false;
5398 }
5399
5400 /*
5401  * Reclaim/compaction is used for high-order allocation requests. It reclaims
5402  * order-0 pages before compacting the zone. should_continue_reclaim() returns
5403  * true if more pages should be reclaimed such that when the page allocator
5404  * calls try_to_compact_zone() that it will have enough free pages to succeed.
5405  * It will give up earlier than that if there is difficulty reclaiming pages.
5406  */
5407 static inline bool should_continue_reclaim(struct pglist_data *pgdat,
5408                                         unsigned long nr_reclaimed,
5409                                         struct scan_control *sc)
5410 {
5411         unsigned long pages_for_compaction;
5412         unsigned long inactive_lru_pages;
5413         int z;
5414
5415         /* If not in reclaim/compaction mode, stop */
5416         if (!in_reclaim_compaction(sc))
5417                 return false;
5418
5419         /*
5420          * Stop if we failed to reclaim any pages from the last SWAP_CLUSTER_MAX
5421          * number of pages that were scanned. This will return to the caller
5422          * with the risk reclaim/compaction and the resulting allocation attempt
5423          * fails. In the past we have tried harder for __GFP_RETRY_MAYFAIL
5424          * allocations through requiring that the full LRU list has been scanned
5425          * first, by assuming that zero delta of sc->nr_scanned means full LRU
5426          * scan, but that approximation was wrong, and there were corner cases
5427          * where always a non-zero amount of pages were scanned.
5428          */
5429         if (!nr_reclaimed)
5430                 return false;
5431
5432         /* If compaction would go ahead or the allocation would succeed, stop */
5433         for (z = 0; z <= sc->reclaim_idx; z++) {
5434                 struct zone *zone = &pgdat->node_zones[z];
5435                 if (!managed_zone(zone))
5436                         continue;
5437
5438                 switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
5439                 case COMPACT_SUCCESS:
5440                 case COMPACT_CONTINUE:
5441                         return false;
5442                 default:
5443                         /* check next zone */
5444                         ;
5445                 }
5446         }
5447
5448         /*
5449          * If we have not reclaimed enough pages for compaction and the
5450          * inactive lists are large enough, continue reclaiming
5451          */
5452         pages_for_compaction = compact_gap(sc->order);
5453         inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
5454         if (get_nr_swap_pages() > 0)
5455                 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
5456
5457         return inactive_lru_pages > pages_for_compaction;
5458 }
5459
5460 static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
5461 {
5462         struct mem_cgroup *target_memcg = sc->target_mem_cgroup;
5463         struct mem_cgroup *memcg;
5464
5465         memcg = mem_cgroup_iter(target_memcg, NULL, NULL);
5466         do {
5467                 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
5468                 unsigned long reclaimed;
5469                 unsigned long scanned;
5470
5471                 mem_cgroup_calculate_protection(target_memcg, memcg);
5472
5473                 if (mem_cgroup_below_min(memcg)) {
5474                         /*
5475                          * Hard protection.
5476                          * If there is no reclaimable memory, OOM.
5477                          */
5478                         continue;
5479                 } else if (mem_cgroup_below_low(memcg)) {
5480                         /*
5481                          * Soft protection.
5482                          * Respect the protection only as long as
5483                          * there is an unprotected supply
5484                          * of reclaimable memory from other cgroups.
5485                          */
5486                         if (!sc->memcg_low_reclaim) {
5487                                 sc->memcg_low_skipped = 1;
5488                                 continue;
5489                         }
5490                         memcg_memory_event(memcg, MEMCG_LOW);
5491                 }
5492
5493                 reclaimed = sc->nr_reclaimed;
5494                 scanned = sc->nr_scanned;
5495
5496                 shrink_lruvec(lruvec, sc);
5497
5498                 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
5499                             sc->priority);
5500
5501                 /* Record the group's reclaim efficiency */
5502                 vmpressure(sc->gfp_mask, memcg, false,
5503                            sc->nr_scanned - scanned,
5504                            sc->nr_reclaimed - reclaimed);
5505
5506         } while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL)));
5507 }
5508
5509 static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
5510 {
5511         struct reclaim_state *reclaim_state = current->reclaim_state;
5512         unsigned long nr_reclaimed, nr_scanned;
5513         struct lruvec *target_lruvec;
5514         bool reclaimable = false;
5515
5516         target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
5517
5518 again:
5519         memset(&sc->nr, 0, sizeof(sc->nr));
5520
5521         nr_reclaimed = sc->nr_reclaimed;
5522         nr_scanned = sc->nr_scanned;
5523
5524         prepare_scan_count(pgdat, sc);
5525
5526         shrink_node_memcgs(pgdat, sc);
5527
5528         if (reclaim_state) {
5529                 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
5530                 reclaim_state->reclaimed_slab = 0;
5531         }
5532
5533         /* Record the subtree's reclaim efficiency */
5534         vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
5535                    sc->nr_scanned - nr_scanned,
5536                    sc->nr_reclaimed - nr_reclaimed);
5537
5538         if (sc->nr_reclaimed - nr_reclaimed)
5539                 reclaimable = true;
5540
5541         if (current_is_kswapd()) {
5542                 /*
5543                  * If reclaim is isolating dirty pages under writeback,
5544                  * it implies that the long-lived page allocation rate
5545                  * is exceeding the page laundering rate. Either the
5546                  * global limits are not being effective at throttling
5547                  * processes due to the page distribution throughout
5548                  * zones or there is heavy usage of a slow backing
5549                  * device. The only option is to throttle from reclaim
5550                  * context which is not ideal as there is no guarantee
5551                  * the dirtying process is throttled in the same way
5552                  * balance_dirty_pages() manages.
5553                  *
5554                  * Once a node is flagged PGDAT_WRITEBACK, kswapd will
5555                  * count the number of pages under pages flagged for
5556                  * immediate reclaim and stall if any are encountered
5557                  * in the nr_immediate check below.
5558                  */
5559                 if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken)
5560                         set_bit(PGDAT_WRITEBACK, &pgdat->flags);
5561
5562                 /* Allow kswapd to start writing pages during reclaim.*/
5563                 if (sc->nr.unqueued_dirty == sc->nr.file_taken)
5564                         set_bit(PGDAT_DIRTY, &pgdat->flags);
5565
5566                 /*
5567                  * If kswapd scans pages marked marked for immediate
5568                  * reclaim and under writeback (nr_immediate), it
5569                  * implies that pages are cycling through the LRU
5570                  * faster than they are written so also forcibly stall.
5571                  */
5572                 if (sc->nr.immediate)
5573                         congestion_wait(BLK_RW_ASYNC, HZ/10);
5574         }
5575
5576         /*
5577          * Tag a node/memcg as congested if all the dirty pages
5578          * scanned were backed by a congested BDI and
5579          * wait_iff_congested will stall.
5580          *
5581          * Legacy memcg will stall in page writeback so avoid forcibly
5582          * stalling in wait_iff_congested().
5583          */
5584         if ((current_is_kswapd() ||
5585              (cgroup_reclaim(sc) && writeback_throttling_sane(sc))) &&
5586             sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
5587                 set_bit(LRUVEC_CONGESTED, &target_lruvec->flags);
5588
5589         /*
5590          * Stall direct reclaim for IO completions if underlying BDIs
5591          * and node is congested. Allow kswapd to continue until it
5592          * starts encountering unqueued dirty pages or cycling through
5593          * the LRU too quickly.
5594          */
5595         if (!current_is_kswapd() && current_may_throttle() &&
5596             !sc->hibernation_mode &&
5597             test_bit(LRUVEC_CONGESTED, &target_lruvec->flags))
5598                 wait_iff_congested(BLK_RW_ASYNC, HZ/10);
5599
5600         if (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
5601                                     sc))
5602                 goto again;
5603
5604         /*
5605          * Kswapd gives up on balancing particular nodes after too
5606          * many failures to reclaim anything from them and goes to
5607          * sleep. On reclaim progress, reset the failure counter. A
5608          * successful direct reclaim run will revive a dormant kswapd.
5609          */
5610         if (reclaimable)
5611                 pgdat->kswapd_failures = 0;
5612
5613         return reclaimable;
5614 }
5615
5616 /*
5617  * Returns true if compaction should go ahead for a costly-order request, or
5618  * the allocation would already succeed without compaction. Return false if we
5619  * should reclaim first.
5620  */
5621 static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
5622 {
5623         unsigned long watermark;
5624         enum compact_result suitable;
5625
5626         suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx);
5627         if (suitable == COMPACT_SUCCESS)
5628                 /* Allocation should succeed already. Don't reclaim. */
5629                 return true;
5630         if (suitable == COMPACT_SKIPPED)
5631                 /* Compaction cannot yet proceed. Do reclaim. */
5632                 return false;
5633
5634         /*
5635          * Compaction is already possible, but it takes time to run and there
5636          * are potentially other callers using the pages just freed. So proceed
5637          * with reclaim to make a buffer of free pages available to give
5638          * compaction a reasonable chance of completing and allocating the page.
5639          * Note that we won't actually reclaim the whole buffer in one attempt
5640          * as the target watermark in should_continue_reclaim() is lower. But if
5641          * we are already above the high+gap watermark, don't reclaim at all.
5642          */
5643         watermark = high_wmark_pages(zone) + compact_gap(sc->order);
5644
5645         return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
5646 }
5647
5648 /*
5649  * This is the direct reclaim path, for page-allocating processes.  We only
5650  * try to reclaim pages from zones which will satisfy the caller's allocation
5651  * request.
5652  *
5653  * If a zone is deemed to be full of pinned pages then just give it a light
5654  * scan then give up on it.
5655  */
5656 static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
5657 {
5658         struct zoneref *z;
5659         struct zone *zone;
5660         unsigned long nr_soft_reclaimed;
5661         unsigned long nr_soft_scanned;
5662         gfp_t orig_mask;
5663         pg_data_t *last_pgdat = NULL;
5664
5665         /*
5666          * If the number of buffer_heads in the machine exceeds the maximum
5667          * allowed level, force direct reclaim to scan the highmem zone as
5668          * highmem pages could be pinning lowmem pages storing buffer_heads
5669          */
5670         orig_mask = sc->gfp_mask;
5671         if (buffer_heads_over_limit) {
5672                 sc->gfp_mask |= __GFP_HIGHMEM;
5673                 sc->reclaim_idx = gfp_zone(sc->gfp_mask);
5674         }
5675
5676         for_each_zone_zonelist_nodemask(zone, z, zonelist,
5677                                         sc->reclaim_idx, sc->nodemask) {
5678                 /*
5679                  * Take care memory controller reclaiming has small influence
5680                  * to global LRU.
5681                  */
5682                 if (!cgroup_reclaim(sc)) {
5683                         if (!cpuset_zone_allowed(zone,
5684                                                  GFP_KERNEL | __GFP_HARDWALL))
5685                                 continue;
5686
5687                         /*
5688                          * If we already have plenty of memory free for
5689                          * compaction in this zone, don't free any more.
5690                          * Even though compaction is invoked for any
5691                          * non-zero order, only frequent costly order
5692                          * reclamation is disruptive enough to become a
5693                          * noticeable problem, like transparent huge
5694                          * page allocations.
5695                          */
5696                         if (IS_ENABLED(CONFIG_COMPACTION) &&
5697                             sc->order > PAGE_ALLOC_COSTLY_ORDER &&
5698                             compaction_ready(zone, sc)) {
5699                                 sc->compaction_ready = true;
5700                                 continue;
5701                         }
5702
5703                         /*
5704                          * Shrink each node in the zonelist once. If the
5705                          * zonelist is ordered by zone (not the default) then a
5706                          * node may be shrunk multiple times but in that case
5707                          * the user prefers lower zones being preserved.
5708                          */
5709                         if (zone->zone_pgdat == last_pgdat)
5710                                 continue;
5711
5712                         /*
5713                          * This steals pages from memory cgroups over softlimit
5714                          * and returns the number of reclaimed pages and
5715                          * scanned pages. This works for global memory pressure
5716                          * and balancing, not for a memcg's limit.
5717                          */
5718                         nr_soft_scanned = 0;
5719                         nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat,
5720                                                 sc->order, sc->gfp_mask,
5721                                                 &nr_soft_scanned);
5722                         sc->nr_reclaimed += nr_soft_reclaimed;
5723                         sc->nr_scanned += nr_soft_scanned;
5724                         /* need some check for avoid more shrink_zone() */
5725                 }
5726
5727                 /* See comment about same check for global reclaim above */
5728                 if (zone->zone_pgdat == last_pgdat)
5729                         continue;
5730                 last_pgdat = zone->zone_pgdat;
5731                 shrink_node(zone->zone_pgdat, sc);
5732         }
5733
5734         /*
5735          * Restore to original mask to avoid the impact on the caller if we
5736          * promoted it to __GFP_HIGHMEM.
5737          */
5738         sc->gfp_mask = orig_mask;
5739 }
5740
5741 static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
5742 {
5743         struct lruvec *target_lruvec;
5744         unsigned long refaults;
5745
5746         if (lru_gen_enabled())
5747                 return;
5748
5749         target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
5750         refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE);
5751         target_lruvec->refaults = refaults;
5752 }
5753
5754 /*
5755  * This is the main entry point to direct page reclaim.
5756  *
5757  * If a full scan of the inactive list fails to free enough memory then we
5758  * are "out of memory" and something needs to be killed.
5759  *
5760  * If the caller is !__GFP_FS then the probability of a failure is reasonably
5761  * high - the zone may be full of dirty or under-writeback pages, which this
5762  * caller can't do much about.  We kick the writeback threads and take explicit
5763  * naps in the hope that some of these pages can be written.  But if the
5764  * allocating task holds filesystem locks which prevent writeout this might not
5765  * work, and the allocation attempt will fail.
5766  *
5767  * returns:     0, if no pages reclaimed
5768  *              else, the number of pages reclaimed
5769  */
5770 static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
5771                                           struct scan_control *sc)
5772 {
5773         int initial_priority = sc->priority;
5774         pg_data_t *last_pgdat;
5775         struct zoneref *z;
5776         struct zone *zone;
5777 retry:
5778         delayacct_freepages_start();
5779
5780         if (!cgroup_reclaim(sc))
5781                 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
5782
5783         do {
5784                 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
5785                                 sc->priority);
5786                 sc->nr_scanned = 0;
5787                 shrink_zones(zonelist, sc);
5788
5789                 if (sc->nr_reclaimed >= sc->nr_to_reclaim)
5790                         break;
5791
5792                 if (sc->compaction_ready)
5793                         break;
5794
5795                 /*
5796                  * If we're getting trouble reclaiming, start doing
5797                  * writepage even in laptop mode.
5798                  */
5799                 if (sc->priority < DEF_PRIORITY - 2)
5800                         sc->may_writepage = 1;
5801         } while (--sc->priority >= 0);
5802
5803         last_pgdat = NULL;
5804         for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx,
5805                                         sc->nodemask) {
5806                 if (zone->zone_pgdat == last_pgdat)
5807                         continue;
5808                 last_pgdat = zone->zone_pgdat;
5809
5810                 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat);
5811
5812                 if (cgroup_reclaim(sc)) {
5813                         struct lruvec *lruvec;
5814
5815                         lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup,
5816                                                    zone->zone_pgdat);
5817                         clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
5818                 }
5819         }
5820
5821         delayacct_freepages_end();
5822
5823         if (sc->nr_reclaimed)
5824                 return sc->nr_reclaimed;
5825
5826         /* Aborted reclaim to try compaction? don't OOM, then */
5827         if (sc->compaction_ready)
5828                 return 1;
5829
5830         /*
5831          * We make inactive:active ratio decisions based on the node's
5832          * composition of memory, but a restrictive reclaim_idx or a
5833          * memory.low cgroup setting can exempt large amounts of
5834          * memory from reclaim. Neither of which are very common, so
5835          * instead of doing costly eligibility calculations of the
5836          * entire cgroup subtree up front, we assume the estimates are
5837          * good, and retry with forcible deactivation if that fails.
5838          */
5839         if (sc->skipped_deactivate) {
5840                 sc->priority = initial_priority;
5841                 sc->force_deactivate = 1;
5842                 sc->skipped_deactivate = 0;
5843                 goto retry;
5844         }
5845
5846         /* Untapped cgroup reserves?  Don't OOM, retry. */
5847         if (sc->memcg_low_skipped) {
5848                 sc->priority = initial_priority;
5849                 sc->force_deactivate = 0;
5850                 sc->skipped_deactivate = 0;
5851                 sc->memcg_low_reclaim = 1;
5852                 sc->memcg_low_skipped = 0;
5853                 goto retry;
5854         }
5855
5856         return 0;
5857 }
5858
5859 static bool allow_direct_reclaim(pg_data_t *pgdat)
5860 {
5861         struct zone *zone;
5862         unsigned long pfmemalloc_reserve = 0;
5863         unsigned long free_pages = 0;
5864         int i;
5865         bool wmark_ok;
5866
5867         if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
5868                 return true;
5869
5870         for (i = 0; i <= ZONE_NORMAL; i++) {
5871                 zone = &pgdat->node_zones[i];
5872                 if (!managed_zone(zone))
5873                         continue;
5874
5875                 if (!zone_reclaimable_pages(zone))
5876                         continue;
5877
5878                 pfmemalloc_reserve += min_wmark_pages(zone);
5879                 free_pages += zone_page_state(zone, NR_FREE_PAGES);
5880         }
5881
5882         /* If there are no reserves (unexpected config) then do not throttle */
5883         if (!pfmemalloc_reserve)
5884                 return true;
5885
5886         wmark_ok = free_pages > pfmemalloc_reserve / 2;
5887
5888         /* kswapd must be awake if processes are being throttled */
5889         if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
5890                 pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx,
5891                                                 (enum zone_type)ZONE_NORMAL);
5892                 wake_up_interruptible(&pgdat->kswapd_wait);
5893         }
5894
5895         return wmark_ok;
5896 }
5897
5898 /*
5899  * Throttle direct reclaimers if backing storage is backed by the network
5900  * and the PFMEMALLOC reserve for the preferred node is getting dangerously
5901  * depleted. kswapd will continue to make progress and wake the processes
5902  * when the low watermark is reached.
5903  *
5904  * Returns true if a fatal signal was delivered during throttling. If this
5905  * happens, the page allocator should not consider triggering the OOM killer.
5906  */
5907 static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
5908                                         nodemask_t *nodemask)
5909 {
5910         struct zoneref *z;
5911         struct zone *zone;
5912         pg_data_t *pgdat = NULL;
5913
5914         /*
5915          * Kernel threads should not be throttled as they may be indirectly
5916          * responsible for cleaning pages necessary for reclaim to make forward
5917          * progress. kjournald for example may enter direct reclaim while
5918          * committing a transaction where throttling it could forcing other
5919          * processes to block on log_wait_commit().
5920          */
5921         if (current->flags & PF_KTHREAD)
5922                 goto out;
5923
5924         /*
5925          * If a fatal signal is pending, this process should not throttle.
5926          * It should return quickly so it can exit and free its memory
5927          */
5928         if (fatal_signal_pending(current))
5929                 goto out;
5930
5931         /*
5932          * Check if the pfmemalloc reserves are ok by finding the first node
5933          * with a usable ZONE_NORMAL or lower zone. The expectation is that
5934          * GFP_KERNEL will be required for allocating network buffers when
5935          * swapping over the network so ZONE_HIGHMEM is unusable.
5936          *
5937          * Throttling is based on the first usable node and throttled processes
5938          * wait on a queue until kswapd makes progress and wakes them. There
5939          * is an affinity then between processes waking up and where reclaim
5940          * progress has been made assuming the process wakes on the same node.
5941          * More importantly, processes running on remote nodes will not compete
5942          * for remote pfmemalloc reserves and processes on different nodes
5943          * should make reasonable progress.
5944          */
5945         for_each_zone_zonelist_nodemask(zone, z, zonelist,
5946                                         gfp_zone(gfp_mask), nodemask) {
5947                 if (zone_idx(zone) > ZONE_NORMAL)
5948                         continue;
5949
5950                 /* Throttle based on the first usable node */
5951                 pgdat = zone->zone_pgdat;
5952                 if (allow_direct_reclaim(pgdat))
5953                         goto out;
5954                 break;
5955         }
5956
5957         /* If no zone was usable by the allocation flags then do not throttle */
5958         if (!pgdat)
5959                 goto out;
5960
5961         /* Account for the throttling */
5962         count_vm_event(PGSCAN_DIRECT_THROTTLE);
5963
5964         /*
5965          * If the caller cannot enter the filesystem, it's possible that it
5966          * is due to the caller holding an FS lock or performing a journal
5967          * transaction in the case of a filesystem like ext[3|4]. In this case,
5968          * it is not safe to block on pfmemalloc_wait as kswapd could be
5969          * blocked waiting on the same lock. Instead, throttle for up to a
5970          * second before continuing.
5971          */
5972         if (!(gfp_mask & __GFP_FS)) {
5973                 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
5974                         allow_direct_reclaim(pgdat), HZ);
5975
5976                 goto check_pending;
5977         }
5978
5979         /* Throttle until kswapd wakes the process */
5980         wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
5981                 allow_direct_reclaim(pgdat));
5982
5983 check_pending:
5984         if (fatal_signal_pending(current))
5985                 return true;
5986
5987 out:
5988         return false;
5989 }
5990
5991 unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
5992                                 gfp_t gfp_mask, nodemask_t *nodemask)
5993 {
5994         unsigned long nr_reclaimed;
5995         struct scan_control sc = {
5996                 .nr_to_reclaim = SWAP_CLUSTER_MAX,
5997                 .gfp_mask = current_gfp_context(gfp_mask),
5998                 .reclaim_idx = gfp_zone(gfp_mask),
5999                 .order = order,
6000                 .nodemask = nodemask,
6001                 .priority = DEF_PRIORITY,
6002                 .may_writepage = !laptop_mode,
6003                 .may_unmap = 1,
6004                 .may_swap = 1,
6005         };
6006
6007         /*
6008          * scan_control uses s8 fields for order, priority, and reclaim_idx.
6009          * Confirm they are large enough for max values.
6010          */
6011         BUILD_BUG_ON(MAX_ORDER > S8_MAX);
6012         BUILD_BUG_ON(DEF_PRIORITY > S8_MAX);
6013         BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX);
6014
6015         /*
6016          * Do not enter reclaim if fatal signal was delivered while throttled.
6017          * 1 is returned so that the page allocator does not OOM kill at this
6018          * point.
6019          */
6020         if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
6021                 return 1;
6022
6023         set_task_reclaim_state(current, &sc.reclaim_state);
6024         trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
6025
6026         nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
6027
6028         trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
6029         set_task_reclaim_state(current, NULL);
6030
6031         return nr_reclaimed;
6032 }
6033
6034 #ifdef CONFIG_MEMCG
6035
6036 /* Only used by soft limit reclaim. Do not reuse for anything else. */
6037 unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
6038                                                 gfp_t gfp_mask, bool noswap,
6039                                                 pg_data_t *pgdat,
6040                                                 unsigned long *nr_scanned)
6041 {
6042         struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
6043         struct scan_control sc = {
6044                 .nr_to_reclaim = SWAP_CLUSTER_MAX,
6045                 .target_mem_cgroup = memcg,
6046                 .may_writepage = !laptop_mode,
6047                 .may_unmap = 1,
6048                 .reclaim_idx = MAX_NR_ZONES - 1,
6049                 .may_swap = !noswap,
6050         };
6051
6052         WARN_ON_ONCE(!current->reclaim_state);
6053
6054         sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
6055                         (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
6056
6057         trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
6058                                                       sc.gfp_mask);
6059
6060         /*
6061          * NOTE: Although we can get the priority field, using it
6062          * here is not a good idea, since it limits the pages we can scan.
6063          * if we don't reclaim here, the shrink_node from balance_pgdat
6064          * will pick up pages from other mem cgroup's as well. We hack
6065          * the priority and make it zero.
6066          */
6067         shrink_lruvec(lruvec, &sc);
6068
6069         trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
6070
6071         *nr_scanned = sc.nr_scanned;
6072
6073         return sc.nr_reclaimed;
6074 }
6075
6076 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
6077                                            unsigned long nr_pages,
6078                                            gfp_t gfp_mask,
6079                                            bool may_swap)
6080 {
6081         struct zonelist *zonelist;
6082         unsigned long nr_reclaimed;
6083         unsigned long pflags;
6084         int nid;
6085         unsigned int noreclaim_flag;
6086         struct scan_control sc = {
6087                 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
6088                 .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) |
6089                                 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
6090                 .reclaim_idx = MAX_NR_ZONES - 1,
6091                 .target_mem_cgroup = memcg,
6092                 .priority = DEF_PRIORITY,
6093                 .may_writepage = !laptop_mode,
6094                 .may_unmap = 1,
6095                 .may_swap = may_swap,
6096         };
6097
6098         set_task_reclaim_state(current, &sc.reclaim_state);
6099         /*
6100          * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
6101          * take care of from where we get pages. So the node where we start the
6102          * scan does not need to be the current node.
6103          */
6104         nid = mem_cgroup_select_victim_node(memcg);
6105
6106         zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
6107
6108         trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
6109
6110         psi_memstall_enter(&pflags);
6111         noreclaim_flag = memalloc_noreclaim_save();
6112
6113         nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
6114
6115         memalloc_noreclaim_restore(noreclaim_flag);
6116         psi_memstall_leave(&pflags);
6117
6118         trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
6119         set_task_reclaim_state(current, NULL);
6120
6121         return nr_reclaimed;
6122 }
6123 #endif
6124
6125 static void kswapd_age_node(struct pglist_data *pgdat, struct scan_control *sc)
6126 {
6127         struct mem_cgroup *memcg;
6128         struct lruvec *lruvec;
6129
6130         if (lru_gen_enabled()) {
6131                 lru_gen_age_node(pgdat, sc);
6132                 return;
6133         }
6134
6135         /* FIXME? */
6136         if (!total_swap_pages)
6137                 return;
6138
6139         lruvec = mem_cgroup_lruvec(NULL, pgdat);
6140         if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON))
6141                 return;
6142
6143         memcg = mem_cgroup_iter(NULL, NULL, NULL);
6144         do {
6145                 lruvec = mem_cgroup_lruvec(memcg, pgdat);
6146                 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
6147                                    sc, LRU_ACTIVE_ANON);
6148                 memcg = mem_cgroup_iter(NULL, memcg, NULL);
6149         } while (memcg);
6150 }
6151
6152 static bool pgdat_watermark_boosted(pg_data_t *pgdat, int classzone_idx)
6153 {
6154         int i;
6155         struct zone *zone;
6156
6157         /*
6158          * Check for watermark boosts top-down as the higher zones
6159          * are more likely to be boosted. Both watermarks and boosts
6160          * should not be checked at the time time as reclaim would
6161          * start prematurely when there is no boosting and a lower
6162          * zone is balanced.
6163          */
6164         for (i = classzone_idx; i >= 0; i--) {
6165                 zone = pgdat->node_zones + i;
6166                 if (!managed_zone(zone))
6167                         continue;
6168
6169                 if (zone->watermark_boost)
6170                         return true;
6171         }
6172
6173         return false;
6174 }
6175
6176 /*
6177  * Returns true if there is an eligible zone balanced for the request order
6178  * and classzone_idx
6179  */
6180 static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
6181 {
6182         int i;
6183         unsigned long mark = -1;
6184         struct zone *zone;
6185
6186         /*
6187          * Check watermarks bottom-up as lower zones are more likely to
6188          * meet watermarks.
6189          */
6190         for (i = 0; i <= classzone_idx; i++) {
6191                 zone = pgdat->node_zones + i;
6192
6193                 if (!managed_zone(zone))
6194                         continue;
6195
6196                 mark = high_wmark_pages(zone);
6197                 if (zone_watermark_ok_safe(zone, order, mark, classzone_idx))
6198                         return true;
6199         }
6200
6201         /*
6202          * If a node has no populated zone within classzone_idx, it does not
6203          * need balancing by definition. This can happen if a zone-restricted
6204          * allocation tries to wake a remote kswapd.
6205          */
6206         if (mark == -1)
6207                 return true;
6208
6209         return false;
6210 }
6211
6212 /* Clear pgdat state for congested, dirty or under writeback. */
6213 static void clear_pgdat_congested(pg_data_t *pgdat)
6214 {
6215         struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat);
6216
6217         clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
6218         clear_bit(PGDAT_DIRTY, &pgdat->flags);
6219         clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
6220 }
6221
6222 /*
6223  * Prepare kswapd for sleeping. This verifies that there are no processes
6224  * waiting in throttle_direct_reclaim() and that watermarks have been met.
6225  *
6226  * Returns true if kswapd is ready to sleep
6227  */
6228 static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx)
6229 {
6230         /*
6231          * The throttled processes are normally woken up in balance_pgdat() as
6232          * soon as allow_direct_reclaim() is true. But there is a potential
6233          * race between when kswapd checks the watermarks and a process gets
6234          * throttled. There is also a potential race if processes get
6235          * throttled, kswapd wakes, a large process exits thereby balancing the
6236          * zones, which causes kswapd to exit balance_pgdat() before reaching
6237          * the wake up checks. If kswapd is going to sleep, no process should
6238          * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
6239          * the wake up is premature, processes will wake kswapd and get
6240          * throttled again. The difference from wake ups in balance_pgdat() is
6241          * that here we are under prepare_to_wait().
6242          */
6243         if (waitqueue_active(&pgdat->pfmemalloc_wait))
6244                 wake_up_all(&pgdat->pfmemalloc_wait);
6245
6246         /* Hopeless node, leave it to direct reclaim */
6247         if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
6248                 return true;
6249
6250         if (pgdat_balanced(pgdat, order, classzone_idx)) {
6251                 clear_pgdat_congested(pgdat);
6252                 return true;
6253         }
6254
6255         return false;
6256 }
6257
6258 /*
6259  * kswapd shrinks a node of pages that are at or below the highest usable
6260  * zone that is currently unbalanced.
6261  *
6262  * Returns true if kswapd scanned at least the requested number of pages to
6263  * reclaim or if the lack of progress was due to pages under writeback.
6264  * This is used to determine if the scanning priority needs to be raised.
6265  */
6266 static bool kswapd_shrink_node(pg_data_t *pgdat,
6267                                struct scan_control *sc)
6268 {
6269         struct zone *zone;
6270         int z;
6271
6272         /* Reclaim a number of pages proportional to the number of zones */
6273         sc->nr_to_reclaim = 0;
6274         for (z = 0; z <= sc->reclaim_idx; z++) {
6275                 zone = pgdat->node_zones + z;
6276                 if (!managed_zone(zone))
6277                         continue;
6278
6279                 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
6280         }
6281
6282         /*
6283          * Historically care was taken to put equal pressure on all zones but
6284          * now pressure is applied based on node LRU order.
6285          */
6286         shrink_node(pgdat, sc);
6287
6288         /*
6289          * Fragmentation may mean that the system cannot be rebalanced for
6290          * high-order allocations. If twice the allocation size has been
6291          * reclaimed then recheck watermarks only at order-0 to prevent
6292          * excessive reclaim. Assume that a process requested a high-order
6293          * can direct reclaim/compact.
6294          */
6295         if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order))
6296                 sc->order = 0;
6297
6298         return sc->nr_scanned >= sc->nr_to_reclaim;
6299 }
6300
6301 /*
6302  * For kswapd, balance_pgdat() will reclaim pages across a node from zones
6303  * that are eligible for use by the caller until at least one zone is
6304  * balanced.
6305  *
6306  * Returns the order kswapd finished reclaiming at.
6307  *
6308  * kswapd scans the zones in the highmem->normal->dma direction.  It skips
6309  * zones which have free_pages > high_wmark_pages(zone), but once a zone is
6310  * found to have free_pages <= high_wmark_pages(zone), any page in that zone
6311  * or lower is eligible for reclaim until at least one usable zone is
6312  * balanced.
6313  */
6314 static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
6315 {
6316         int i;
6317         unsigned long nr_soft_reclaimed;
6318         unsigned long nr_soft_scanned;
6319         unsigned long pflags;
6320         unsigned long nr_boost_reclaim;
6321         unsigned long zone_boosts[MAX_NR_ZONES] = { 0, };
6322         bool boosted;
6323         struct zone *zone;
6324         struct scan_control sc = {
6325                 .gfp_mask = GFP_KERNEL,
6326                 .order = order,
6327                 .may_unmap = 1,
6328         };
6329
6330         set_task_reclaim_state(current, &sc.reclaim_state);
6331         psi_memstall_enter(&pflags);
6332         __fs_reclaim_acquire();
6333
6334         count_vm_event(PAGEOUTRUN);
6335
6336         /*
6337          * Account for the reclaim boost. Note that the zone boost is left in
6338          * place so that parallel allocations that are near the watermark will
6339          * stall or direct reclaim until kswapd is finished.
6340          */
6341         nr_boost_reclaim = 0;
6342         for (i = 0; i <= classzone_idx; i++) {
6343                 zone = pgdat->node_zones + i;
6344                 if (!managed_zone(zone))
6345                         continue;
6346
6347                 nr_boost_reclaim += zone->watermark_boost;
6348                 zone_boosts[i] = zone->watermark_boost;
6349         }
6350         boosted = nr_boost_reclaim;
6351
6352 restart:
6353         sc.priority = DEF_PRIORITY;
6354         do {
6355                 unsigned long nr_reclaimed = sc.nr_reclaimed;
6356                 bool raise_priority = true;
6357                 bool balanced;
6358                 bool ret;
6359
6360                 sc.reclaim_idx = classzone_idx;
6361
6362                 /*
6363                  * If the number of buffer_heads exceeds the maximum allowed
6364                  * then consider reclaiming from all zones. This has a dual
6365                  * purpose -- on 64-bit systems it is expected that
6366                  * buffer_heads are stripped during active rotation. On 32-bit
6367                  * systems, highmem pages can pin lowmem memory and shrinking
6368                  * buffers can relieve lowmem pressure. Reclaim may still not
6369                  * go ahead if all eligible zones for the original allocation
6370                  * request are balanced to avoid excessive reclaim from kswapd.
6371                  */
6372                 if (buffer_heads_over_limit) {
6373                         for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
6374                                 zone = pgdat->node_zones + i;
6375                                 if (!managed_zone(zone))
6376                                         continue;
6377
6378                                 sc.reclaim_idx = i;
6379                                 break;
6380                         }
6381                 }
6382
6383                 /*
6384                  * If the pgdat is imbalanced then ignore boosting and preserve
6385                  * the watermarks for a later time and restart. Note that the
6386                  * zone watermarks will be still reset at the end of balancing
6387                  * on the grounds that the normal reclaim should be enough to
6388                  * re-evaluate if boosting is required when kswapd next wakes.
6389                  */
6390                 balanced = pgdat_balanced(pgdat, sc.order, classzone_idx);
6391                 if (!balanced && nr_boost_reclaim) {
6392                         nr_boost_reclaim = 0;
6393                         goto restart;
6394                 }
6395
6396                 /*
6397                  * If boosting is not active then only reclaim if there are no
6398                  * eligible zones. Note that sc.reclaim_idx is not used as
6399                  * buffer_heads_over_limit may have adjusted it.
6400                  */
6401                 if (!nr_boost_reclaim && balanced)
6402                         goto out;
6403
6404                 /* Limit the priority of boosting to avoid reclaim writeback */
6405                 if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2)
6406                         raise_priority = false;
6407
6408                 /*
6409                  * Do not writeback or swap pages for boosted reclaim. The
6410                  * intent is to relieve pressure not issue sub-optimal IO
6411                  * from reclaim context. If no pages are reclaimed, the
6412                  * reclaim will be aborted.
6413                  */
6414                 sc.may_writepage = !laptop_mode && !nr_boost_reclaim;
6415                 sc.may_swap = !nr_boost_reclaim;
6416
6417                 /*
6418                  * Do some background aging, to give pages a chance to be
6419                  * referenced before reclaiming. All pages are rotated
6420                  * regardless of classzone as this is about consistent aging.
6421                  */
6422                 kswapd_age_node(pgdat, &sc);
6423
6424                 /*
6425                  * If we're getting trouble reclaiming, start doing writepage
6426                  * even in laptop mode.
6427                  */
6428                 if (sc.priority < DEF_PRIORITY - 2)
6429                         sc.may_writepage = 1;
6430
6431                 /* Call soft limit reclaim before calling shrink_node. */
6432                 sc.nr_scanned = 0;
6433                 nr_soft_scanned = 0;
6434                 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order,
6435                                                 sc.gfp_mask, &nr_soft_scanned);
6436                 sc.nr_reclaimed += nr_soft_reclaimed;
6437
6438                 /*
6439                  * There should be no need to raise the scanning priority if
6440                  * enough pages are already being scanned that that high
6441                  * watermark would be met at 100% efficiency.
6442                  */
6443                 if (kswapd_shrink_node(pgdat, &sc))
6444                         raise_priority = false;
6445
6446                 /*
6447                  * If the low watermark is met there is no need for processes
6448                  * to be throttled on pfmemalloc_wait as they should not be
6449                  * able to safely make forward progress. Wake them
6450                  */
6451                 if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
6452                                 allow_direct_reclaim(pgdat))
6453                         wake_up_all(&pgdat->pfmemalloc_wait);
6454
6455                 /* Check if kswapd should be suspending */
6456                 __fs_reclaim_release();
6457                 ret = try_to_freeze();
6458                 __fs_reclaim_acquire();
6459                 if (ret || kthread_should_stop())
6460                         break;
6461
6462                 /*
6463                  * Raise priority if scanning rate is too low or there was no
6464                  * progress in reclaiming pages
6465                  */
6466                 nr_reclaimed = sc.nr_reclaimed - nr_reclaimed;
6467                 nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed);
6468
6469                 /*
6470                  * If reclaim made no progress for a boost, stop reclaim as
6471                  * IO cannot be queued and it could be an infinite loop in
6472                  * extreme circumstances.
6473                  */
6474                 if (nr_boost_reclaim && !nr_reclaimed)
6475                         break;
6476
6477                 if (raise_priority || !nr_reclaimed)
6478                         sc.priority--;
6479         } while (sc.priority >= 1);
6480
6481         if (!sc.nr_reclaimed)
6482                 pgdat->kswapd_failures++;
6483
6484 out:
6485         /* If reclaim was boosted, account for the reclaim done in this pass */
6486         if (boosted) {
6487                 unsigned long flags;
6488
6489                 for (i = 0; i <= classzone_idx; i++) {
6490                         if (!zone_boosts[i])
6491                                 continue;
6492
6493                         /* Increments are under the zone lock */
6494                         zone = pgdat->node_zones + i;
6495                         spin_lock_irqsave(&zone->lock, flags);
6496                         zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]);
6497                         spin_unlock_irqrestore(&zone->lock, flags);
6498                 }
6499
6500                 /*
6501                  * As there is now likely space, wakeup kcompact to defragment
6502                  * pageblocks.
6503                  */
6504                 wakeup_kcompactd(pgdat, pageblock_order, classzone_idx);
6505         }
6506
6507         snapshot_refaults(NULL, pgdat);
6508         __fs_reclaim_release();
6509         psi_memstall_leave(&pflags);
6510         set_task_reclaim_state(current, NULL);
6511
6512         /*
6513          * Return the order kswapd stopped reclaiming at as
6514          * prepare_kswapd_sleep() takes it into account. If another caller
6515          * entered the allocator slow path while kswapd was awake, order will
6516          * remain at the higher level.
6517          */
6518         return sc.order;
6519 }
6520
6521 /*
6522  * The pgdat->kswapd_classzone_idx is used to pass the highest zone index to be
6523  * reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is not
6524  * a valid index then either kswapd runs for first time or kswapd couldn't sleep
6525  * after previous reclaim attempt (node is still unbalanced). In that case
6526  * return the zone index of the previous kswapd reclaim cycle.
6527  */
6528 static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat,
6529                                            enum zone_type prev_classzone_idx)
6530 {
6531         if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
6532                 return prev_classzone_idx;
6533         return pgdat->kswapd_classzone_idx;
6534 }
6535
6536 static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
6537                                 unsigned int classzone_idx)
6538 {
6539         long remaining = 0;
6540         DEFINE_WAIT(wait);
6541
6542         if (freezing(current) || kthread_should_stop())
6543                 return;
6544
6545         prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
6546
6547         /*
6548          * Try to sleep for a short interval. Note that kcompactd will only be
6549          * woken if it is possible to sleep for a short interval. This is
6550          * deliberate on the assumption that if reclaim cannot keep an
6551          * eligible zone balanced that it's also unlikely that compaction will
6552          * succeed.
6553          */
6554         if (prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) {
6555                 /*
6556                  * Compaction records what page blocks it recently failed to
6557                  * isolate pages from and skips them in the future scanning.
6558                  * When kswapd is going to sleep, it is reasonable to assume
6559                  * that pages and compaction may succeed so reset the cache.
6560                  */
6561                 reset_isolation_suitable(pgdat);
6562
6563                 /*
6564                  * We have freed the memory, now we should compact it to make
6565                  * allocation of the requested order possible.
6566                  */
6567                 wakeup_kcompactd(pgdat, alloc_order, classzone_idx);
6568
6569                 remaining = schedule_timeout(HZ/10);
6570
6571                 /*
6572                  * If woken prematurely then reset kswapd_classzone_idx and
6573                  * order. The values will either be from a wakeup request or
6574                  * the previous request that slept prematurely.
6575                  */
6576                 if (remaining) {
6577                         pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
6578                         pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order);
6579                 }
6580
6581                 finish_wait(&pgdat->kswapd_wait, &wait);
6582                 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
6583         }
6584
6585         /*
6586          * After a short sleep, check if it was a premature sleep. If not, then
6587          * go fully to sleep until explicitly woken up.
6588          */
6589         if (!remaining &&
6590             prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) {
6591                 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
6592
6593                 /*
6594                  * vmstat counters are not perfectly accurate and the estimated
6595                  * value for counters such as NR_FREE_PAGES can deviate from the
6596                  * true value by nr_online_cpus * threshold. To avoid the zone
6597                  * watermarks being breached while under pressure, we reduce the
6598                  * per-cpu vmstat threshold while kswapd is awake and restore
6599                  * them before going back to sleep.
6600                  */
6601                 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
6602
6603                 if (!kthread_should_stop())
6604                         schedule();
6605
6606                 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
6607         } else {
6608                 if (remaining)
6609                         count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
6610                 else
6611                         count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
6612         }
6613         finish_wait(&pgdat->kswapd_wait, &wait);
6614 }
6615
6616 /*
6617  * The background pageout daemon, started as a kernel thread
6618  * from the init process.
6619  *
6620  * This basically trickles out pages so that we have _some_
6621  * free memory available even if there is no other activity
6622  * that frees anything up. This is needed for things like routing
6623  * etc, where we otherwise might have all activity going on in
6624  * asynchronous contexts that cannot page things out.
6625  *
6626  * If there are applications that are active memory-allocators
6627  * (most normal use), this basically shouldn't matter.
6628  */
6629 static int kswapd(void *p)
6630 {
6631         unsigned int alloc_order, reclaim_order;
6632         unsigned int classzone_idx = MAX_NR_ZONES - 1;
6633         pg_data_t *pgdat = (pg_data_t*)p;
6634         struct task_struct *tsk = current;
6635         const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
6636
6637         if (!cpumask_empty(cpumask))
6638                 set_cpus_allowed_ptr(tsk, cpumask);
6639
6640         /*
6641          * Tell the memory management that we're a "memory allocator",
6642          * and that if we need more memory we should get access to it
6643          * regardless (see "__alloc_pages()"). "kswapd" should
6644          * never get caught in the normal page freeing logic.
6645          *
6646          * (Kswapd normally doesn't need memory anyway, but sometimes
6647          * you need a small amount of memory in order to be able to
6648          * page out something else, and this flag essentially protects
6649          * us from recursively trying to free more memory as we're
6650          * trying to free the first piece of memory in the first place).
6651          */
6652         tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
6653         set_freezable();
6654
6655         pgdat->kswapd_order = 0;
6656         pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
6657         for ( ; ; ) {
6658                 bool ret;
6659
6660                 alloc_order = reclaim_order = pgdat->kswapd_order;
6661                 classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
6662
6663 kswapd_try_sleep:
6664                 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order,
6665                                         classzone_idx);
6666
6667                 /* Read the new order and classzone_idx */
6668                 alloc_order = reclaim_order = pgdat->kswapd_order;
6669                 classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
6670                 pgdat->kswapd_order = 0;
6671                 pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
6672
6673                 ret = try_to_freeze();
6674                 if (kthread_should_stop())
6675                         break;
6676
6677                 /*
6678                  * We can speed up thawing tasks if we don't call balance_pgdat
6679                  * after returning from the refrigerator
6680                  */
6681                 if (ret)
6682                         continue;
6683
6684                 /*
6685                  * Reclaim begins at the requested order but if a high-order
6686                  * reclaim fails then kswapd falls back to reclaiming for
6687                  * order-0. If that happens, kswapd will consider sleeping
6688                  * for the order it finished reclaiming at (reclaim_order)
6689                  * but kcompactd is woken to compact for the original
6690                  * request (alloc_order).
6691                  */
6692                 trace_mm_vmscan_kswapd_wake(pgdat->node_id, classzone_idx,
6693                                                 alloc_order);
6694                 reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx);
6695                 if (reclaim_order < alloc_order)
6696                         goto kswapd_try_sleep;
6697         }
6698
6699         tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
6700
6701         return 0;
6702 }
6703
6704 /*
6705  * A zone is low on free memory or too fragmented for high-order memory.  If
6706  * kswapd should reclaim (direct reclaim is deferred), wake it up for the zone's
6707  * pgdat.  It will wake up kcompactd after reclaiming memory.  If kswapd reclaim
6708  * has failed or is not needed, still wake up kcompactd if only compaction is
6709  * needed.
6710  */
6711 void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
6712                    enum zone_type classzone_idx)
6713 {
6714         pg_data_t *pgdat;
6715
6716         if (!managed_zone(zone))
6717                 return;
6718
6719         if (!cpuset_zone_allowed(zone, gfp_flags))
6720                 return;
6721         pgdat = zone->zone_pgdat;
6722
6723         if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
6724                 pgdat->kswapd_classzone_idx = classzone_idx;
6725         else
6726                 pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx,
6727                                                   classzone_idx);
6728         pgdat->kswapd_order = max(pgdat->kswapd_order, order);
6729         if (!waitqueue_active(&pgdat->kswapd_wait))
6730                 return;
6731
6732         /* Hopeless node, leave it to direct reclaim if possible */
6733         if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ||
6734             (pgdat_balanced(pgdat, order, classzone_idx) &&
6735              !pgdat_watermark_boosted(pgdat, classzone_idx))) {
6736                 /*
6737                  * There may be plenty of free memory available, but it's too
6738                  * fragmented for high-order allocations.  Wake up kcompactd
6739                  * and rely on compaction_suitable() to determine if it's
6740                  * needed.  If it fails, it will defer subsequent attempts to
6741                  * ratelimit its work.
6742                  */
6743                 if (!(gfp_flags & __GFP_DIRECT_RECLAIM))
6744                         wakeup_kcompactd(pgdat, order, classzone_idx);
6745                 return;
6746         }
6747
6748         trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, classzone_idx, order,
6749                                       gfp_flags);
6750         wake_up_interruptible(&pgdat->kswapd_wait);
6751 }
6752
6753 #ifdef CONFIG_HIBERNATION
6754 /*
6755  * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
6756  * freed pages.
6757  *
6758  * Rather than trying to age LRUs the aim is to preserve the overall
6759  * LRU order by reclaiming preferentially
6760  * inactive > active > active referenced > active mapped
6761  */
6762 unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
6763 {
6764         struct scan_control sc = {
6765                 .nr_to_reclaim = nr_to_reclaim,
6766                 .gfp_mask = GFP_HIGHUSER_MOVABLE,
6767                 .reclaim_idx = MAX_NR_ZONES - 1,
6768                 .priority = DEF_PRIORITY,
6769                 .may_writepage = 1,
6770                 .may_unmap = 1,
6771                 .may_swap = 1,
6772                 .hibernation_mode = 1,
6773         };
6774         struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
6775         unsigned long nr_reclaimed;
6776         unsigned int noreclaim_flag;
6777
6778         fs_reclaim_acquire(sc.gfp_mask);
6779         noreclaim_flag = memalloc_noreclaim_save();
6780         set_task_reclaim_state(current, &sc.reclaim_state);
6781
6782         nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
6783
6784         set_task_reclaim_state(current, NULL);
6785         memalloc_noreclaim_restore(noreclaim_flag);
6786         fs_reclaim_release(sc.gfp_mask);
6787
6788         return nr_reclaimed;
6789 }
6790 #endif /* CONFIG_HIBERNATION */
6791
6792 /* It's optimal to keep kswapds on the same CPUs as their memory, but
6793    not required for correctness.  So if the last cpu in a node goes
6794    away, we get changed to run anywhere: as the first one comes back,
6795    restore their cpu bindings. */
6796 static int kswapd_cpu_online(unsigned int cpu)
6797 {
6798         int nid;
6799
6800         for_each_node_state(nid, N_MEMORY) {
6801                 pg_data_t *pgdat = NODE_DATA(nid);
6802                 const struct cpumask *mask;
6803
6804                 mask = cpumask_of_node(pgdat->node_id);
6805
6806                 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
6807                         /* One of our CPUs online: restore mask */
6808                         set_cpus_allowed_ptr(pgdat->kswapd, mask);
6809         }
6810         return 0;
6811 }
6812
6813 /*
6814  * This kswapd start function will be called by init and node-hot-add.
6815  * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
6816  */
6817 int kswapd_run(int nid)
6818 {
6819         pg_data_t *pgdat = NODE_DATA(nid);
6820         int ret = 0;
6821
6822         if (pgdat->kswapd)
6823                 return 0;
6824
6825         pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
6826         if (IS_ERR(pgdat->kswapd)) {
6827                 /* failure at boot is fatal */
6828                 BUG_ON(system_state < SYSTEM_RUNNING);
6829                 pr_err("Failed to start kswapd on node %d\n", nid);
6830                 ret = PTR_ERR(pgdat->kswapd);
6831                 pgdat->kswapd = NULL;
6832         }
6833         return ret;
6834 }
6835
6836 /*
6837  * Called by memory hotplug when all memory in a node is offlined.  Caller must
6838  * hold mem_hotplug_begin/end().
6839  */
6840 void kswapd_stop(int nid)
6841 {
6842         struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
6843
6844         if (kswapd) {
6845                 kthread_stop(kswapd);
6846                 NODE_DATA(nid)->kswapd = NULL;
6847         }
6848 }
6849
6850 static int __init kswapd_init(void)
6851 {
6852         int nid, ret;
6853
6854         swap_setup();
6855         for_each_node_state(nid, N_MEMORY)
6856                 kswapd_run(nid);
6857         ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
6858                                         "mm/vmscan:online", kswapd_cpu_online,
6859                                         NULL);
6860         WARN_ON(ret < 0);
6861         return 0;
6862 }
6863
6864 module_init(kswapd_init)
6865
6866 #ifdef CONFIG_NUMA
6867 /*
6868  * Node reclaim mode
6869  *
6870  * If non-zero call node_reclaim when the number of free pages falls below
6871  * the watermarks.
6872  */
6873 int node_reclaim_mode __read_mostly;
6874
6875 #define RECLAIM_OFF 0
6876 #define RECLAIM_ZONE (1<<0)     /* Run shrink_inactive_list on the zone */
6877 #define RECLAIM_WRITE (1<<1)    /* Writeout pages during reclaim */
6878 #define RECLAIM_UNMAP (1<<2)    /* Unmap pages during reclaim */
6879
6880 /*
6881  * Priority for NODE_RECLAIM. This determines the fraction of pages
6882  * of a node considered for each zone_reclaim. 4 scans 1/16th of
6883  * a zone.
6884  */
6885 #define NODE_RECLAIM_PRIORITY 4
6886
6887 /*
6888  * Percentage of pages in a zone that must be unmapped for node_reclaim to
6889  * occur.
6890  */
6891 int sysctl_min_unmapped_ratio = 1;
6892
6893 /*
6894  * If the number of slab pages in a zone grows beyond this percentage then
6895  * slab reclaim needs to occur.
6896  */
6897 int sysctl_min_slab_ratio = 5;
6898
6899 static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
6900 {
6901         unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED);
6902         unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) +
6903                 node_page_state(pgdat, NR_ACTIVE_FILE);
6904
6905         /*
6906          * It's possible for there to be more file mapped pages than
6907          * accounted for by the pages on the file LRU lists because
6908          * tmpfs pages accounted for as ANON can also be FILE_MAPPED
6909          */
6910         return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
6911 }
6912
6913 /* Work out how many page cache pages we can reclaim in this reclaim_mode */
6914 static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
6915 {
6916         unsigned long nr_pagecache_reclaimable;
6917         unsigned long delta = 0;
6918
6919         /*
6920          * If RECLAIM_UNMAP is set, then all file pages are considered
6921          * potentially reclaimable. Otherwise, we have to worry about
6922          * pages like swapcache and node_unmapped_file_pages() provides
6923          * a better estimate
6924          */
6925         if (node_reclaim_mode & RECLAIM_UNMAP)
6926                 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES);
6927         else
6928                 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat);
6929
6930         /* If we can't clean pages, remove dirty pages from consideration */
6931         if (!(node_reclaim_mode & RECLAIM_WRITE))
6932                 delta += node_page_state(pgdat, NR_FILE_DIRTY);
6933
6934         /* Watch for any possible underflows due to delta */
6935         if (unlikely(delta > nr_pagecache_reclaimable))
6936                 delta = nr_pagecache_reclaimable;
6937
6938         return nr_pagecache_reclaimable - delta;
6939 }
6940
6941 /*
6942  * Try to free up some pages from this node through reclaim.
6943  */
6944 static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
6945 {
6946         /* Minimum pages needed in order to stay on node */
6947         const unsigned long nr_pages = 1 << order;
6948         struct task_struct *p = current;
6949         unsigned int noreclaim_flag;
6950         struct scan_control sc = {
6951                 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
6952                 .gfp_mask = current_gfp_context(gfp_mask),
6953                 .order = order,
6954                 .priority = NODE_RECLAIM_PRIORITY,
6955                 .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
6956                 .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
6957                 .may_swap = 1,
6958                 .reclaim_idx = gfp_zone(gfp_mask),
6959         };
6960
6961         trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
6962                                            sc.gfp_mask);
6963
6964         cond_resched();
6965         fs_reclaim_acquire(sc.gfp_mask);
6966         /*
6967          * We need to be able to allocate from the reserves for RECLAIM_UNMAP
6968          * and we also need to be able to write out pages for RECLAIM_WRITE
6969          * and RECLAIM_UNMAP.
6970          */
6971         noreclaim_flag = memalloc_noreclaim_save();
6972         p->flags |= PF_SWAPWRITE;
6973         set_task_reclaim_state(p, &sc.reclaim_state);
6974
6975         if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
6976                 /*
6977                  * Free memory by calling shrink node with increasing
6978                  * priorities until we have enough memory freed.
6979                  */
6980                 do {
6981                         shrink_node(pgdat, &sc);
6982                 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
6983         }
6984
6985         set_task_reclaim_state(p, NULL);
6986         current->flags &= ~PF_SWAPWRITE;
6987         memalloc_noreclaim_restore(noreclaim_flag);
6988         fs_reclaim_release(sc.gfp_mask);
6989
6990         trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed);
6991
6992         return sc.nr_reclaimed >= nr_pages;
6993 }
6994
6995 int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
6996 {
6997         int ret;
6998
6999         /*
7000          * Node reclaim reclaims unmapped file backed pages and
7001          * slab pages if we are over the defined limits.
7002          *
7003          * A small portion of unmapped file backed pages is needed for
7004          * file I/O otherwise pages read by file I/O will be immediately
7005          * thrown out if the node is overallocated. So we do not reclaim
7006          * if less than a specified percentage of the node is used by
7007          * unmapped file backed pages.
7008          */
7009         if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
7010             node_page_state(pgdat, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages)
7011                 return NODE_RECLAIM_FULL;
7012
7013         /*
7014          * Do not scan if the allocation should not be delayed.
7015          */
7016         if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC))
7017                 return NODE_RECLAIM_NOSCAN;
7018
7019         /*
7020          * Only run node reclaim on the local node or on nodes that do not
7021          * have associated processors. This will favor the local processor
7022          * over remote processors and spread off node memory allocations
7023          * as wide as possible.
7024          */
7025         if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
7026                 return NODE_RECLAIM_NOSCAN;
7027
7028         if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
7029                 return NODE_RECLAIM_NOSCAN;
7030
7031         ret = __node_reclaim(pgdat, gfp_mask, order);
7032         clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
7033
7034         if (!ret)
7035                 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
7036
7037         return ret;
7038 }
7039 #endif
7040
7041 /*
7042  * page_evictable - test whether a page is evictable
7043  * @page: the page to test
7044  *
7045  * Test whether page is evictable--i.e., should be placed on active/inactive
7046  * lists vs unevictable list.
7047  *
7048  * Reasons page might not be evictable:
7049  * (1) page's mapping marked unevictable
7050  * (2) page is part of an mlocked VMA
7051  *
7052  */
7053 int page_evictable(struct page *page)
7054 {
7055         int ret;
7056
7057         /* Prevent address_space of inode and swap cache from being freed */
7058         rcu_read_lock();
7059         ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
7060         rcu_read_unlock();
7061         return ret;
7062 }
7063
7064 /**
7065  * check_move_unevictable_pages - check pages for evictability and move to
7066  * appropriate zone lru list
7067  * @pvec: pagevec with lru pages to check
7068  *
7069  * Checks pages for evictability, if an evictable page is in the unevictable
7070  * lru list, moves it to the appropriate evictable lru list. This function
7071  * should be only used for lru pages.
7072  */
7073 void check_move_unevictable_pages(struct pagevec *pvec)
7074 {
7075         struct lruvec *lruvec;
7076         struct pglist_data *pgdat = NULL;
7077         int pgscanned = 0;
7078         int pgrescued = 0;
7079         int i;
7080
7081         for (i = 0; i < pvec->nr; i++) {
7082                 struct page *page = pvec->pages[i];
7083                 struct pglist_data *pagepgdat = page_pgdat(page);
7084
7085                 pgscanned++;
7086
7087                 if (!TestClearPageLRU(page))
7088                         continue;
7089
7090                 if (pagepgdat != pgdat) {
7091                         if (pgdat)
7092                                 spin_unlock_irq(&pgdat->lru_lock);
7093                         pgdat = pagepgdat;
7094                         spin_lock_irq(&pgdat->lru_lock);
7095                 }
7096                 lruvec = mem_cgroup_page_lruvec(page, pgdat);
7097
7098                 if (page_evictable(page) && PageUnevictable(page)) {
7099                         enum lru_list lru = page_lru_base_type(page);
7100
7101                         VM_BUG_ON_PAGE(PageActive(page), page);
7102                         ClearPageUnevictable(page);
7103                         del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
7104                         add_page_to_lru_list(page, lruvec, lru);
7105                         pgrescued++;
7106                 }
7107                 SetPageLRU(page);
7108         }
7109
7110         if (pgdat) {
7111                 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
7112                 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
7113                 spin_unlock_irq(&pgdat->lru_lock);
7114         } else if (pgscanned) {
7115                 count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
7116         }
7117 }
7118 EXPORT_SYMBOL_GPL(check_move_unevictable_pages);