Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[platform/kernel/linux-starfive.git] / mm / vmstat.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/vmstat.c
4  *
5  *  Manages VM statistics
6  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
7  *
8  *  zoned VM statistics
9  *  Copyright (C) 2006 Silicon Graphics, Inc.,
10  *              Christoph Lameter <christoph@lameter.com>
11  *  Copyright (C) 2008-2014 Christoph Lameter
12  */
13 #include <linux/fs.h>
14 #include <linux/mm.h>
15 #include <linux/err.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/cpu.h>
19 #include <linux/cpumask.h>
20 #include <linux/vmstat.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/debugfs.h>
24 #include <linux/sched.h>
25 #include <linux/math64.h>
26 #include <linux/writeback.h>
27 #include <linux/compaction.h>
28 #include <linux/mm_inline.h>
29 #include <linux/page_ext.h>
30 #include <linux/page_owner.h>
31
32 #include "internal.h"
33
34 #define NUMA_STATS_THRESHOLD (U16_MAX - 2)
35
36 #ifdef CONFIG_NUMA
37 int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
38
39 /* zero numa counters within a zone */
40 static void zero_zone_numa_counters(struct zone *zone)
41 {
42         int item, cpu;
43
44         for (item = 0; item < NR_VM_NUMA_STAT_ITEMS; item++) {
45                 atomic_long_set(&zone->vm_numa_stat[item], 0);
46                 for_each_online_cpu(cpu)
47                         per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]
48                                                 = 0;
49         }
50 }
51
52 /* zero numa counters of all the populated zones */
53 static void zero_zones_numa_counters(void)
54 {
55         struct zone *zone;
56
57         for_each_populated_zone(zone)
58                 zero_zone_numa_counters(zone);
59 }
60
61 /* zero global numa counters */
62 static void zero_global_numa_counters(void)
63 {
64         int item;
65
66         for (item = 0; item < NR_VM_NUMA_STAT_ITEMS; item++)
67                 atomic_long_set(&vm_numa_stat[item], 0);
68 }
69
70 static void invalid_numa_statistics(void)
71 {
72         zero_zones_numa_counters();
73         zero_global_numa_counters();
74 }
75
76 static DEFINE_MUTEX(vm_numa_stat_lock);
77
78 int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
79                 void __user *buffer, size_t *length, loff_t *ppos)
80 {
81         int ret, oldval;
82
83         mutex_lock(&vm_numa_stat_lock);
84         if (write)
85                 oldval = sysctl_vm_numa_stat;
86         ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
87         if (ret || !write)
88                 goto out;
89
90         if (oldval == sysctl_vm_numa_stat)
91                 goto out;
92         else if (sysctl_vm_numa_stat == ENABLE_NUMA_STAT) {
93                 static_branch_enable(&vm_numa_stat_key);
94                 pr_info("enable numa statistics\n");
95         } else {
96                 static_branch_disable(&vm_numa_stat_key);
97                 invalid_numa_statistics();
98                 pr_info("disable numa statistics, and clear numa counters\n");
99         }
100
101 out:
102         mutex_unlock(&vm_numa_stat_lock);
103         return ret;
104 }
105 #endif
106
107 #ifdef CONFIG_VM_EVENT_COUNTERS
108 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
109 EXPORT_PER_CPU_SYMBOL(vm_event_states);
110
111 static void sum_vm_events(unsigned long *ret)
112 {
113         int cpu;
114         int i;
115
116         memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
117
118         for_each_online_cpu(cpu) {
119                 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
120
121                 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
122                         ret[i] += this->event[i];
123         }
124 }
125
126 /*
127  * Accumulate the vm event counters across all CPUs.
128  * The result is unavoidably approximate - it can change
129  * during and after execution of this function.
130 */
131 void all_vm_events(unsigned long *ret)
132 {
133         get_online_cpus();
134         sum_vm_events(ret);
135         put_online_cpus();
136 }
137 EXPORT_SYMBOL_GPL(all_vm_events);
138
139 /*
140  * Fold the foreign cpu events into our own.
141  *
142  * This is adding to the events on one processor
143  * but keeps the global counts constant.
144  */
145 void vm_events_fold_cpu(int cpu)
146 {
147         struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
148         int i;
149
150         for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
151                 count_vm_events(i, fold_state->event[i]);
152                 fold_state->event[i] = 0;
153         }
154 }
155
156 #endif /* CONFIG_VM_EVENT_COUNTERS */
157
158 /*
159  * Manage combined zone based / global counters
160  *
161  * vm_stat contains the global counters
162  */
163 atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
164 atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS] __cacheline_aligned_in_smp;
165 atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
166 EXPORT_SYMBOL(vm_zone_stat);
167 EXPORT_SYMBOL(vm_numa_stat);
168 EXPORT_SYMBOL(vm_node_stat);
169
170 #ifdef CONFIG_SMP
171
172 int calculate_pressure_threshold(struct zone *zone)
173 {
174         int threshold;
175         int watermark_distance;
176
177         /*
178          * As vmstats are not up to date, there is drift between the estimated
179          * and real values. For high thresholds and a high number of CPUs, it
180          * is possible for the min watermark to be breached while the estimated
181          * value looks fine. The pressure threshold is a reduced value such
182          * that even the maximum amount of drift will not accidentally breach
183          * the min watermark
184          */
185         watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
186         threshold = max(1, (int)(watermark_distance / num_online_cpus()));
187
188         /*
189          * Maximum threshold is 125
190          */
191         threshold = min(125, threshold);
192
193         return threshold;
194 }
195
196 int calculate_normal_threshold(struct zone *zone)
197 {
198         int threshold;
199         int mem;        /* memory in 128 MB units */
200
201         /*
202          * The threshold scales with the number of processors and the amount
203          * of memory per zone. More memory means that we can defer updates for
204          * longer, more processors could lead to more contention.
205          * fls() is used to have a cheap way of logarithmic scaling.
206          *
207          * Some sample thresholds:
208          *
209          * Threshold    Processors      (fls)   Zonesize        fls(mem+1)
210          * ------------------------------------------------------------------
211          * 8            1               1       0.9-1 GB        4
212          * 16           2               2       0.9-1 GB        4
213          * 20           2               2       1-2 GB          5
214          * 24           2               2       2-4 GB          6
215          * 28           2               2       4-8 GB          7
216          * 32           2               2       8-16 GB         8
217          * 4            2               2       <128M           1
218          * 30           4               3       2-4 GB          5
219          * 48           4               3       8-16 GB         8
220          * 32           8               4       1-2 GB          4
221          * 32           8               4       0.9-1GB         4
222          * 10           16              5       <128M           1
223          * 40           16              5       900M            4
224          * 70           64              7       2-4 GB          5
225          * 84           64              7       4-8 GB          6
226          * 108          512             9       4-8 GB          6
227          * 125          1024            10      8-16 GB         8
228          * 125          1024            10      16-32 GB        9
229          */
230
231         mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
232
233         threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
234
235         /*
236          * Maximum threshold is 125
237          */
238         threshold = min(125, threshold);
239
240         return threshold;
241 }
242
243 /*
244  * Refresh the thresholds for each zone.
245  */
246 void refresh_zone_stat_thresholds(void)
247 {
248         struct pglist_data *pgdat;
249         struct zone *zone;
250         int cpu;
251         int threshold;
252
253         /* Zero current pgdat thresholds */
254         for_each_online_pgdat(pgdat) {
255                 for_each_online_cpu(cpu) {
256                         per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
257                 }
258         }
259
260         for_each_populated_zone(zone) {
261                 struct pglist_data *pgdat = zone->zone_pgdat;
262                 unsigned long max_drift, tolerate_drift;
263
264                 threshold = calculate_normal_threshold(zone);
265
266                 for_each_online_cpu(cpu) {
267                         int pgdat_threshold;
268
269                         per_cpu_ptr(zone->pageset, cpu)->stat_threshold
270                                                         = threshold;
271
272                         /* Base nodestat threshold on the largest populated zone. */
273                         pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
274                         per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
275                                 = max(threshold, pgdat_threshold);
276                 }
277
278                 /*
279                  * Only set percpu_drift_mark if there is a danger that
280                  * NR_FREE_PAGES reports the low watermark is ok when in fact
281                  * the min watermark could be breached by an allocation
282                  */
283                 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
284                 max_drift = num_online_cpus() * threshold;
285                 if (max_drift > tolerate_drift)
286                         zone->percpu_drift_mark = high_wmark_pages(zone) +
287                                         max_drift;
288         }
289 }
290
291 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
292                                 int (*calculate_pressure)(struct zone *))
293 {
294         struct zone *zone;
295         int cpu;
296         int threshold;
297         int i;
298
299         for (i = 0; i < pgdat->nr_zones; i++) {
300                 zone = &pgdat->node_zones[i];
301                 if (!zone->percpu_drift_mark)
302                         continue;
303
304                 threshold = (*calculate_pressure)(zone);
305                 for_each_online_cpu(cpu)
306                         per_cpu_ptr(zone->pageset, cpu)->stat_threshold
307                                                         = threshold;
308         }
309 }
310
311 /*
312  * For use when we know that interrupts are disabled,
313  * or when we know that preemption is disabled and that
314  * particular counter cannot be updated from interrupt context.
315  */
316 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
317                            long delta)
318 {
319         struct per_cpu_pageset __percpu *pcp = zone->pageset;
320         s8 __percpu *p = pcp->vm_stat_diff + item;
321         long x;
322         long t;
323
324         x = delta + __this_cpu_read(*p);
325
326         t = __this_cpu_read(pcp->stat_threshold);
327
328         if (unlikely(x > t || x < -t)) {
329                 zone_page_state_add(x, zone, item);
330                 x = 0;
331         }
332         __this_cpu_write(*p, x);
333 }
334 EXPORT_SYMBOL(__mod_zone_page_state);
335
336 void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
337                                 long delta)
338 {
339         struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
340         s8 __percpu *p = pcp->vm_node_stat_diff + item;
341         long x;
342         long t;
343
344         x = delta + __this_cpu_read(*p);
345
346         t = __this_cpu_read(pcp->stat_threshold);
347
348         if (unlikely(x > t || x < -t)) {
349                 node_page_state_add(x, pgdat, item);
350                 x = 0;
351         }
352         __this_cpu_write(*p, x);
353 }
354 EXPORT_SYMBOL(__mod_node_page_state);
355
356 /*
357  * Optimized increment and decrement functions.
358  *
359  * These are only for a single page and therefore can take a struct page *
360  * argument instead of struct zone *. This allows the inclusion of the code
361  * generated for page_zone(page) into the optimized functions.
362  *
363  * No overflow check is necessary and therefore the differential can be
364  * incremented or decremented in place which may allow the compilers to
365  * generate better code.
366  * The increment or decrement is known and therefore one boundary check can
367  * be omitted.
368  *
369  * NOTE: These functions are very performance sensitive. Change only
370  * with care.
371  *
372  * Some processors have inc/dec instructions that are atomic vs an interrupt.
373  * However, the code must first determine the differential location in a zone
374  * based on the processor number and then inc/dec the counter. There is no
375  * guarantee without disabling preemption that the processor will not change
376  * in between and therefore the atomicity vs. interrupt cannot be exploited
377  * in a useful way here.
378  */
379 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
380 {
381         struct per_cpu_pageset __percpu *pcp = zone->pageset;
382         s8 __percpu *p = pcp->vm_stat_diff + item;
383         s8 v, t;
384
385         v = __this_cpu_inc_return(*p);
386         t = __this_cpu_read(pcp->stat_threshold);
387         if (unlikely(v > t)) {
388                 s8 overstep = t >> 1;
389
390                 zone_page_state_add(v + overstep, zone, item);
391                 __this_cpu_write(*p, -overstep);
392         }
393 }
394
395 void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
396 {
397         struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
398         s8 __percpu *p = pcp->vm_node_stat_diff + item;
399         s8 v, t;
400
401         v = __this_cpu_inc_return(*p);
402         t = __this_cpu_read(pcp->stat_threshold);
403         if (unlikely(v > t)) {
404                 s8 overstep = t >> 1;
405
406                 node_page_state_add(v + overstep, pgdat, item);
407                 __this_cpu_write(*p, -overstep);
408         }
409 }
410
411 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
412 {
413         __inc_zone_state(page_zone(page), item);
414 }
415 EXPORT_SYMBOL(__inc_zone_page_state);
416
417 void __inc_node_page_state(struct page *page, enum node_stat_item item)
418 {
419         __inc_node_state(page_pgdat(page), item);
420 }
421 EXPORT_SYMBOL(__inc_node_page_state);
422
423 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
424 {
425         struct per_cpu_pageset __percpu *pcp = zone->pageset;
426         s8 __percpu *p = pcp->vm_stat_diff + item;
427         s8 v, t;
428
429         v = __this_cpu_dec_return(*p);
430         t = __this_cpu_read(pcp->stat_threshold);
431         if (unlikely(v < - t)) {
432                 s8 overstep = t >> 1;
433
434                 zone_page_state_add(v - overstep, zone, item);
435                 __this_cpu_write(*p, overstep);
436         }
437 }
438
439 void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
440 {
441         struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
442         s8 __percpu *p = pcp->vm_node_stat_diff + item;
443         s8 v, t;
444
445         v = __this_cpu_dec_return(*p);
446         t = __this_cpu_read(pcp->stat_threshold);
447         if (unlikely(v < - t)) {
448                 s8 overstep = t >> 1;
449
450                 node_page_state_add(v - overstep, pgdat, item);
451                 __this_cpu_write(*p, overstep);
452         }
453 }
454
455 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
456 {
457         __dec_zone_state(page_zone(page), item);
458 }
459 EXPORT_SYMBOL(__dec_zone_page_state);
460
461 void __dec_node_page_state(struct page *page, enum node_stat_item item)
462 {
463         __dec_node_state(page_pgdat(page), item);
464 }
465 EXPORT_SYMBOL(__dec_node_page_state);
466
467 #ifdef CONFIG_HAVE_CMPXCHG_LOCAL
468 /*
469  * If we have cmpxchg_local support then we do not need to incur the overhead
470  * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
471  *
472  * mod_state() modifies the zone counter state through atomic per cpu
473  * operations.
474  *
475  * Overstep mode specifies how overstep should handled:
476  *     0       No overstepping
477  *     1       Overstepping half of threshold
478  *     -1      Overstepping minus half of threshold
479 */
480 static inline void mod_zone_state(struct zone *zone,
481        enum zone_stat_item item, long delta, int overstep_mode)
482 {
483         struct per_cpu_pageset __percpu *pcp = zone->pageset;
484         s8 __percpu *p = pcp->vm_stat_diff + item;
485         long o, n, t, z;
486
487         do {
488                 z = 0;  /* overflow to zone counters */
489
490                 /*
491                  * The fetching of the stat_threshold is racy. We may apply
492                  * a counter threshold to the wrong the cpu if we get
493                  * rescheduled while executing here. However, the next
494                  * counter update will apply the threshold again and
495                  * therefore bring the counter under the threshold again.
496                  *
497                  * Most of the time the thresholds are the same anyways
498                  * for all cpus in a zone.
499                  */
500                 t = this_cpu_read(pcp->stat_threshold);
501
502                 o = this_cpu_read(*p);
503                 n = delta + o;
504
505                 if (n > t || n < -t) {
506                         int os = overstep_mode * (t >> 1) ;
507
508                         /* Overflow must be added to zone counters */
509                         z = n + os;
510                         n = -os;
511                 }
512         } while (this_cpu_cmpxchg(*p, o, n) != o);
513
514         if (z)
515                 zone_page_state_add(z, zone, item);
516 }
517
518 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
519                          long delta)
520 {
521         mod_zone_state(zone, item, delta, 0);
522 }
523 EXPORT_SYMBOL(mod_zone_page_state);
524
525 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
526 {
527         mod_zone_state(page_zone(page), item, 1, 1);
528 }
529 EXPORT_SYMBOL(inc_zone_page_state);
530
531 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
532 {
533         mod_zone_state(page_zone(page), item, -1, -1);
534 }
535 EXPORT_SYMBOL(dec_zone_page_state);
536
537 static inline void mod_node_state(struct pglist_data *pgdat,
538        enum node_stat_item item, int delta, int overstep_mode)
539 {
540         struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
541         s8 __percpu *p = pcp->vm_node_stat_diff + item;
542         long o, n, t, z;
543
544         do {
545                 z = 0;  /* overflow to node counters */
546
547                 /*
548                  * The fetching of the stat_threshold is racy. We may apply
549                  * a counter threshold to the wrong the cpu if we get
550                  * rescheduled while executing here. However, the next
551                  * counter update will apply the threshold again and
552                  * therefore bring the counter under the threshold again.
553                  *
554                  * Most of the time the thresholds are the same anyways
555                  * for all cpus in a node.
556                  */
557                 t = this_cpu_read(pcp->stat_threshold);
558
559                 o = this_cpu_read(*p);
560                 n = delta + o;
561
562                 if (n > t || n < -t) {
563                         int os = overstep_mode * (t >> 1) ;
564
565                         /* Overflow must be added to node counters */
566                         z = n + os;
567                         n = -os;
568                 }
569         } while (this_cpu_cmpxchg(*p, o, n) != o);
570
571         if (z)
572                 node_page_state_add(z, pgdat, item);
573 }
574
575 void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
576                                         long delta)
577 {
578         mod_node_state(pgdat, item, delta, 0);
579 }
580 EXPORT_SYMBOL(mod_node_page_state);
581
582 void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
583 {
584         mod_node_state(pgdat, item, 1, 1);
585 }
586
587 void inc_node_page_state(struct page *page, enum node_stat_item item)
588 {
589         mod_node_state(page_pgdat(page), item, 1, 1);
590 }
591 EXPORT_SYMBOL(inc_node_page_state);
592
593 void dec_node_page_state(struct page *page, enum node_stat_item item)
594 {
595         mod_node_state(page_pgdat(page), item, -1, -1);
596 }
597 EXPORT_SYMBOL(dec_node_page_state);
598 #else
599 /*
600  * Use interrupt disable to serialize counter updates
601  */
602 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
603                          long delta)
604 {
605         unsigned long flags;
606
607         local_irq_save(flags);
608         __mod_zone_page_state(zone, item, delta);
609         local_irq_restore(flags);
610 }
611 EXPORT_SYMBOL(mod_zone_page_state);
612
613 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
614 {
615         unsigned long flags;
616         struct zone *zone;
617
618         zone = page_zone(page);
619         local_irq_save(flags);
620         __inc_zone_state(zone, item);
621         local_irq_restore(flags);
622 }
623 EXPORT_SYMBOL(inc_zone_page_state);
624
625 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
626 {
627         unsigned long flags;
628
629         local_irq_save(flags);
630         __dec_zone_page_state(page, item);
631         local_irq_restore(flags);
632 }
633 EXPORT_SYMBOL(dec_zone_page_state);
634
635 void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
636 {
637         unsigned long flags;
638
639         local_irq_save(flags);
640         __inc_node_state(pgdat, item);
641         local_irq_restore(flags);
642 }
643 EXPORT_SYMBOL(inc_node_state);
644
645 void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
646                                         long delta)
647 {
648         unsigned long flags;
649
650         local_irq_save(flags);
651         __mod_node_page_state(pgdat, item, delta);
652         local_irq_restore(flags);
653 }
654 EXPORT_SYMBOL(mod_node_page_state);
655
656 void inc_node_page_state(struct page *page, enum node_stat_item item)
657 {
658         unsigned long flags;
659         struct pglist_data *pgdat;
660
661         pgdat = page_pgdat(page);
662         local_irq_save(flags);
663         __inc_node_state(pgdat, item);
664         local_irq_restore(flags);
665 }
666 EXPORT_SYMBOL(inc_node_page_state);
667
668 void dec_node_page_state(struct page *page, enum node_stat_item item)
669 {
670         unsigned long flags;
671
672         local_irq_save(flags);
673         __dec_node_page_state(page, item);
674         local_irq_restore(flags);
675 }
676 EXPORT_SYMBOL(dec_node_page_state);
677 #endif
678
679 /*
680  * Fold a differential into the global counters.
681  * Returns the number of counters updated.
682  */
683 #ifdef CONFIG_NUMA
684 static int fold_diff(int *zone_diff, int *numa_diff, int *node_diff)
685 {
686         int i;
687         int changes = 0;
688
689         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
690                 if (zone_diff[i]) {
691                         atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
692                         changes++;
693         }
694
695         for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
696                 if (numa_diff[i]) {
697                         atomic_long_add(numa_diff[i], &vm_numa_stat[i]);
698                         changes++;
699         }
700
701         for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
702                 if (node_diff[i]) {
703                         atomic_long_add(node_diff[i], &vm_node_stat[i]);
704                         changes++;
705         }
706         return changes;
707 }
708 #else
709 static int fold_diff(int *zone_diff, int *node_diff)
710 {
711         int i;
712         int changes = 0;
713
714         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
715                 if (zone_diff[i]) {
716                         atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
717                         changes++;
718         }
719
720         for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
721                 if (node_diff[i]) {
722                         atomic_long_add(node_diff[i], &vm_node_stat[i]);
723                         changes++;
724         }
725         return changes;
726 }
727 #endif /* CONFIG_NUMA */
728
729 /*
730  * Update the zone counters for the current cpu.
731  *
732  * Note that refresh_cpu_vm_stats strives to only access
733  * node local memory. The per cpu pagesets on remote zones are placed
734  * in the memory local to the processor using that pageset. So the
735  * loop over all zones will access a series of cachelines local to
736  * the processor.
737  *
738  * The call to zone_page_state_add updates the cachelines with the
739  * statistics in the remote zone struct as well as the global cachelines
740  * with the global counters. These could cause remote node cache line
741  * bouncing and will have to be only done when necessary.
742  *
743  * The function returns the number of global counters updated.
744  */
745 static int refresh_cpu_vm_stats(bool do_pagesets)
746 {
747         struct pglist_data *pgdat;
748         struct zone *zone;
749         int i;
750         int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
751 #ifdef CONFIG_NUMA
752         int global_numa_diff[NR_VM_NUMA_STAT_ITEMS] = { 0, };
753 #endif
754         int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
755         int changes = 0;
756
757         for_each_populated_zone(zone) {
758                 struct per_cpu_pageset __percpu *p = zone->pageset;
759
760                 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
761                         int v;
762
763                         v = this_cpu_xchg(p->vm_stat_diff[i], 0);
764                         if (v) {
765
766                                 atomic_long_add(v, &zone->vm_stat[i]);
767                                 global_zone_diff[i] += v;
768 #ifdef CONFIG_NUMA
769                                 /* 3 seconds idle till flush */
770                                 __this_cpu_write(p->expire, 3);
771 #endif
772                         }
773                 }
774 #ifdef CONFIG_NUMA
775                 for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) {
776                         int v;
777
778                         v = this_cpu_xchg(p->vm_numa_stat_diff[i], 0);
779                         if (v) {
780
781                                 atomic_long_add(v, &zone->vm_numa_stat[i]);
782                                 global_numa_diff[i] += v;
783                                 __this_cpu_write(p->expire, 3);
784                         }
785                 }
786
787                 if (do_pagesets) {
788                         cond_resched();
789                         /*
790                          * Deal with draining the remote pageset of this
791                          * processor
792                          *
793                          * Check if there are pages remaining in this pageset
794                          * if not then there is nothing to expire.
795                          */
796                         if (!__this_cpu_read(p->expire) ||
797                                !__this_cpu_read(p->pcp.count))
798                                 continue;
799
800                         /*
801                          * We never drain zones local to this processor.
802                          */
803                         if (zone_to_nid(zone) == numa_node_id()) {
804                                 __this_cpu_write(p->expire, 0);
805                                 continue;
806                         }
807
808                         if (__this_cpu_dec_return(p->expire))
809                                 continue;
810
811                         if (__this_cpu_read(p->pcp.count)) {
812                                 drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
813                                 changes++;
814                         }
815                 }
816 #endif
817         }
818
819         for_each_online_pgdat(pgdat) {
820                 struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
821
822                 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
823                         int v;
824
825                         v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
826                         if (v) {
827                                 atomic_long_add(v, &pgdat->vm_stat[i]);
828                                 global_node_diff[i] += v;
829                         }
830                 }
831         }
832
833 #ifdef CONFIG_NUMA
834         changes += fold_diff(global_zone_diff, global_numa_diff,
835                              global_node_diff);
836 #else
837         changes += fold_diff(global_zone_diff, global_node_diff);
838 #endif
839         return changes;
840 }
841
842 /*
843  * Fold the data for an offline cpu into the global array.
844  * There cannot be any access by the offline cpu and therefore
845  * synchronization is simplified.
846  */
847 void cpu_vm_stats_fold(int cpu)
848 {
849         struct pglist_data *pgdat;
850         struct zone *zone;
851         int i;
852         int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
853 #ifdef CONFIG_NUMA
854         int global_numa_diff[NR_VM_NUMA_STAT_ITEMS] = { 0, };
855 #endif
856         int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
857
858         for_each_populated_zone(zone) {
859                 struct per_cpu_pageset *p;
860
861                 p = per_cpu_ptr(zone->pageset, cpu);
862
863                 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
864                         if (p->vm_stat_diff[i]) {
865                                 int v;
866
867                                 v = p->vm_stat_diff[i];
868                                 p->vm_stat_diff[i] = 0;
869                                 atomic_long_add(v, &zone->vm_stat[i]);
870                                 global_zone_diff[i] += v;
871                         }
872
873 #ifdef CONFIG_NUMA
874                 for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
875                         if (p->vm_numa_stat_diff[i]) {
876                                 int v;
877
878                                 v = p->vm_numa_stat_diff[i];
879                                 p->vm_numa_stat_diff[i] = 0;
880                                 atomic_long_add(v, &zone->vm_numa_stat[i]);
881                                 global_numa_diff[i] += v;
882                         }
883 #endif
884         }
885
886         for_each_online_pgdat(pgdat) {
887                 struct per_cpu_nodestat *p;
888
889                 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
890
891                 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
892                         if (p->vm_node_stat_diff[i]) {
893                                 int v;
894
895                                 v = p->vm_node_stat_diff[i];
896                                 p->vm_node_stat_diff[i] = 0;
897                                 atomic_long_add(v, &pgdat->vm_stat[i]);
898                                 global_node_diff[i] += v;
899                         }
900         }
901
902 #ifdef CONFIG_NUMA
903         fold_diff(global_zone_diff, global_numa_diff, global_node_diff);
904 #else
905         fold_diff(global_zone_diff, global_node_diff);
906 #endif
907 }
908
909 /*
910  * this is only called if !populated_zone(zone), which implies no other users of
911  * pset->vm_stat_diff[] exsist.
912  */
913 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
914 {
915         int i;
916
917         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
918                 if (pset->vm_stat_diff[i]) {
919                         int v = pset->vm_stat_diff[i];
920                         pset->vm_stat_diff[i] = 0;
921                         atomic_long_add(v, &zone->vm_stat[i]);
922                         atomic_long_add(v, &vm_zone_stat[i]);
923                 }
924
925 #ifdef CONFIG_NUMA
926         for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
927                 if (pset->vm_numa_stat_diff[i]) {
928                         int v = pset->vm_numa_stat_diff[i];
929
930                         pset->vm_numa_stat_diff[i] = 0;
931                         atomic_long_add(v, &zone->vm_numa_stat[i]);
932                         atomic_long_add(v, &vm_numa_stat[i]);
933                 }
934 #endif
935 }
936 #endif
937
938 #ifdef CONFIG_NUMA
939 void __inc_numa_state(struct zone *zone,
940                                  enum numa_stat_item item)
941 {
942         struct per_cpu_pageset __percpu *pcp = zone->pageset;
943         u16 __percpu *p = pcp->vm_numa_stat_diff + item;
944         u16 v;
945
946         v = __this_cpu_inc_return(*p);
947
948         if (unlikely(v > NUMA_STATS_THRESHOLD)) {
949                 zone_numa_state_add(v, zone, item);
950                 __this_cpu_write(*p, 0);
951         }
952 }
953
954 /*
955  * Determine the per node value of a stat item. This function
956  * is called frequently in a NUMA machine, so try to be as
957  * frugal as possible.
958  */
959 unsigned long sum_zone_node_page_state(int node,
960                                  enum zone_stat_item item)
961 {
962         struct zone *zones = NODE_DATA(node)->node_zones;
963         int i;
964         unsigned long count = 0;
965
966         for (i = 0; i < MAX_NR_ZONES; i++)
967                 count += zone_page_state(zones + i, item);
968
969         return count;
970 }
971
972 /*
973  * Determine the per node value of a numa stat item. To avoid deviation,
974  * the per cpu stat number in vm_numa_stat_diff[] is also included.
975  */
976 unsigned long sum_zone_numa_state(int node,
977                                  enum numa_stat_item item)
978 {
979         struct zone *zones = NODE_DATA(node)->node_zones;
980         int i;
981         unsigned long count = 0;
982
983         for (i = 0; i < MAX_NR_ZONES; i++)
984                 count += zone_numa_state_snapshot(zones + i, item);
985
986         return count;
987 }
988
989 /*
990  * Determine the per node value of a stat item.
991  */
992 unsigned long node_page_state(struct pglist_data *pgdat,
993                                 enum node_stat_item item)
994 {
995         long x = atomic_long_read(&pgdat->vm_stat[item]);
996 #ifdef CONFIG_SMP
997         if (x < 0)
998                 x = 0;
999 #endif
1000         return x;
1001 }
1002 #endif
1003
1004 #ifdef CONFIG_COMPACTION
1005
1006 struct contig_page_info {
1007         unsigned long free_pages;
1008         unsigned long free_blocks_total;
1009         unsigned long free_blocks_suitable;
1010 };
1011
1012 /*
1013  * Calculate the number of free pages in a zone, how many contiguous
1014  * pages are free and how many are large enough to satisfy an allocation of
1015  * the target size. Note that this function makes no attempt to estimate
1016  * how many suitable free blocks there *might* be if MOVABLE pages were
1017  * migrated. Calculating that is possible, but expensive and can be
1018  * figured out from userspace
1019  */
1020 static void fill_contig_page_info(struct zone *zone,
1021                                 unsigned int suitable_order,
1022                                 struct contig_page_info *info)
1023 {
1024         unsigned int order;
1025
1026         info->free_pages = 0;
1027         info->free_blocks_total = 0;
1028         info->free_blocks_suitable = 0;
1029
1030         for (order = 0; order < MAX_ORDER; order++) {
1031                 unsigned long blocks;
1032
1033                 /* Count number of free blocks */
1034                 blocks = zone->free_area[order].nr_free;
1035                 info->free_blocks_total += blocks;
1036
1037                 /* Count free base pages */
1038                 info->free_pages += blocks << order;
1039
1040                 /* Count the suitable free blocks */
1041                 if (order >= suitable_order)
1042                         info->free_blocks_suitable += blocks <<
1043                                                 (order - suitable_order);
1044         }
1045 }
1046
1047 /*
1048  * A fragmentation index only makes sense if an allocation of a requested
1049  * size would fail. If that is true, the fragmentation index indicates
1050  * whether external fragmentation or a lack of memory was the problem.
1051  * The value can be used to determine if page reclaim or compaction
1052  * should be used
1053  */
1054 static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
1055 {
1056         unsigned long requested = 1UL << order;
1057
1058         if (WARN_ON_ONCE(order >= MAX_ORDER))
1059                 return 0;
1060
1061         if (!info->free_blocks_total)
1062                 return 0;
1063
1064         /* Fragmentation index only makes sense when a request would fail */
1065         if (info->free_blocks_suitable)
1066                 return -1000;
1067
1068         /*
1069          * Index is between 0 and 1 so return within 3 decimal places
1070          *
1071          * 0 => allocation would fail due to lack of memory
1072          * 1 => allocation would fail due to fragmentation
1073          */
1074         return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
1075 }
1076
1077 /* Same as __fragmentation index but allocs contig_page_info on stack */
1078 int fragmentation_index(struct zone *zone, unsigned int order)
1079 {
1080         struct contig_page_info info;
1081
1082         fill_contig_page_info(zone, order, &info);
1083         return __fragmentation_index(order, &info);
1084 }
1085 #endif
1086
1087 #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \
1088     defined(CONFIG_NUMA) || defined(CONFIG_MEMCG)
1089 #ifdef CONFIG_ZONE_DMA
1090 #define TEXT_FOR_DMA(xx) xx "_dma",
1091 #else
1092 #define TEXT_FOR_DMA(xx)
1093 #endif
1094
1095 #ifdef CONFIG_ZONE_DMA32
1096 #define TEXT_FOR_DMA32(xx) xx "_dma32",
1097 #else
1098 #define TEXT_FOR_DMA32(xx)
1099 #endif
1100
1101 #ifdef CONFIG_HIGHMEM
1102 #define TEXT_FOR_HIGHMEM(xx) xx "_high",
1103 #else
1104 #define TEXT_FOR_HIGHMEM(xx)
1105 #endif
1106
1107 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
1108                                         TEXT_FOR_HIGHMEM(xx) xx "_movable",
1109
1110 const char * const vmstat_text[] = {
1111         /* enum zone_stat_item countes */
1112         "nr_free_pages",
1113         "nr_zone_inactive_anon",
1114         "nr_zone_active_anon",
1115         "nr_zone_inactive_file",
1116         "nr_zone_active_file",
1117         "nr_zone_unevictable",
1118         "nr_zone_write_pending",
1119         "nr_mlock",
1120         "nr_page_table_pages",
1121         "nr_kernel_stack",
1122         "nr_bounce",
1123 #if IS_ENABLED(CONFIG_ZSMALLOC)
1124         "nr_zspages",
1125 #endif
1126         "nr_free_cma",
1127
1128         /* enum numa_stat_item counters */
1129 #ifdef CONFIG_NUMA
1130         "numa_hit",
1131         "numa_miss",
1132         "numa_foreign",
1133         "numa_interleave",
1134         "numa_local",
1135         "numa_other",
1136 #endif
1137
1138         /* enum node_stat_item counters */
1139         "nr_inactive_anon",
1140         "nr_active_anon",
1141         "nr_inactive_file",
1142         "nr_active_file",
1143         "nr_unevictable",
1144         "nr_slab_reclaimable",
1145         "nr_slab_unreclaimable",
1146         "nr_isolated_anon",
1147         "nr_isolated_file",
1148         "workingset_nodes",
1149         "workingset_refault",
1150         "workingset_activate",
1151         "workingset_restore",
1152         "workingset_nodereclaim",
1153         "nr_anon_pages",
1154         "nr_mapped",
1155         "nr_file_pages",
1156         "nr_dirty",
1157         "nr_writeback",
1158         "nr_writeback_temp",
1159         "nr_shmem",
1160         "nr_shmem_hugepages",
1161         "nr_shmem_pmdmapped",
1162         "nr_file_hugepages",
1163         "nr_file_pmdmapped",
1164         "nr_anon_transparent_hugepages",
1165         "nr_unstable",
1166         "nr_vmscan_write",
1167         "nr_vmscan_immediate_reclaim",
1168         "nr_dirtied",
1169         "nr_written",
1170         "nr_kernel_misc_reclaimable",
1171
1172         /* enum writeback_stat_item counters */
1173         "nr_dirty_threshold",
1174         "nr_dirty_background_threshold",
1175
1176 #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
1177         /* enum vm_event_item counters */
1178         "pgpgin",
1179         "pgpgout",
1180         "pswpin",
1181         "pswpout",
1182
1183         TEXTS_FOR_ZONES("pgalloc")
1184         TEXTS_FOR_ZONES("allocstall")
1185         TEXTS_FOR_ZONES("pgskip")
1186
1187         "pgfree",
1188         "pgactivate",
1189         "pgdeactivate",
1190         "pglazyfree",
1191
1192         "pgfault",
1193         "pgmajfault",
1194         "pglazyfreed",
1195
1196         "pgrefill",
1197         "pgsteal_kswapd",
1198         "pgsteal_direct",
1199         "pgscan_kswapd",
1200         "pgscan_direct",
1201         "pgscan_direct_throttle",
1202
1203 #ifdef CONFIG_NUMA
1204         "zone_reclaim_failed",
1205 #endif
1206         "pginodesteal",
1207         "slabs_scanned",
1208         "kswapd_inodesteal",
1209         "kswapd_low_wmark_hit_quickly",
1210         "kswapd_high_wmark_hit_quickly",
1211         "pageoutrun",
1212
1213         "pgrotated",
1214
1215         "drop_pagecache",
1216         "drop_slab",
1217         "oom_kill",
1218
1219 #ifdef CONFIG_NUMA_BALANCING
1220         "numa_pte_updates",
1221         "numa_huge_pte_updates",
1222         "numa_hint_faults",
1223         "numa_hint_faults_local",
1224         "numa_pages_migrated",
1225 #endif
1226 #ifdef CONFIG_MIGRATION
1227         "pgmigrate_success",
1228         "pgmigrate_fail",
1229 #endif
1230 #ifdef CONFIG_COMPACTION
1231         "compact_migrate_scanned",
1232         "compact_free_scanned",
1233         "compact_isolated",
1234         "compact_stall",
1235         "compact_fail",
1236         "compact_success",
1237         "compact_daemon_wake",
1238         "compact_daemon_migrate_scanned",
1239         "compact_daemon_free_scanned",
1240 #endif
1241
1242 #ifdef CONFIG_HUGETLB_PAGE
1243         "htlb_buddy_alloc_success",
1244         "htlb_buddy_alloc_fail",
1245 #endif
1246         "unevictable_pgs_culled",
1247         "unevictable_pgs_scanned",
1248         "unevictable_pgs_rescued",
1249         "unevictable_pgs_mlocked",
1250         "unevictable_pgs_munlocked",
1251         "unevictable_pgs_cleared",
1252         "unevictable_pgs_stranded",
1253
1254 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1255         "thp_fault_alloc",
1256         "thp_fault_fallback",
1257         "thp_collapse_alloc",
1258         "thp_collapse_alloc_failed",
1259         "thp_file_alloc",
1260         "thp_file_mapped",
1261         "thp_split_page",
1262         "thp_split_page_failed",
1263         "thp_deferred_split_page",
1264         "thp_split_pmd",
1265 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1266         "thp_split_pud",
1267 #endif
1268         "thp_zero_page_alloc",
1269         "thp_zero_page_alloc_failed",
1270         "thp_swpout",
1271         "thp_swpout_fallback",
1272 #endif
1273 #ifdef CONFIG_MEMORY_BALLOON
1274         "balloon_inflate",
1275         "balloon_deflate",
1276 #ifdef CONFIG_BALLOON_COMPACTION
1277         "balloon_migrate",
1278 #endif
1279 #endif /* CONFIG_MEMORY_BALLOON */
1280 #ifdef CONFIG_DEBUG_TLBFLUSH
1281         "nr_tlb_remote_flush",
1282         "nr_tlb_remote_flush_received",
1283         "nr_tlb_local_flush_all",
1284         "nr_tlb_local_flush_one",
1285 #endif /* CONFIG_DEBUG_TLBFLUSH */
1286
1287 #ifdef CONFIG_DEBUG_VM_VMACACHE
1288         "vmacache_find_calls",
1289         "vmacache_find_hits",
1290 #endif
1291 #ifdef CONFIG_SWAP
1292         "swap_ra",
1293         "swap_ra_hit",
1294 #endif
1295 #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
1296 };
1297 #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
1298
1299 #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
1300      defined(CONFIG_PROC_FS)
1301 static void *frag_start(struct seq_file *m, loff_t *pos)
1302 {
1303         pg_data_t *pgdat;
1304         loff_t node = *pos;
1305
1306         for (pgdat = first_online_pgdat();
1307              pgdat && node;
1308              pgdat = next_online_pgdat(pgdat))
1309                 --node;
1310
1311         return pgdat;
1312 }
1313
1314 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
1315 {
1316         pg_data_t *pgdat = (pg_data_t *)arg;
1317
1318         (*pos)++;
1319         return next_online_pgdat(pgdat);
1320 }
1321
1322 static void frag_stop(struct seq_file *m, void *arg)
1323 {
1324 }
1325
1326 /*
1327  * Walk zones in a node and print using a callback.
1328  * If @assert_populated is true, only use callback for zones that are populated.
1329  */
1330 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
1331                 bool assert_populated, bool nolock,
1332                 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
1333 {
1334         struct zone *zone;
1335         struct zone *node_zones = pgdat->node_zones;
1336         unsigned long flags;
1337
1338         for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
1339                 if (assert_populated && !populated_zone(zone))
1340                         continue;
1341
1342                 if (!nolock)
1343                         spin_lock_irqsave(&zone->lock, flags);
1344                 print(m, pgdat, zone);
1345                 if (!nolock)
1346                         spin_unlock_irqrestore(&zone->lock, flags);
1347         }
1348 }
1349 #endif
1350
1351 #ifdef CONFIG_PROC_FS
1352 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
1353                                                 struct zone *zone)
1354 {
1355         int order;
1356
1357         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1358         for (order = 0; order < MAX_ORDER; ++order)
1359                 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
1360         seq_putc(m, '\n');
1361 }
1362
1363 /*
1364  * This walks the free areas for each zone.
1365  */
1366 static int frag_show(struct seq_file *m, void *arg)
1367 {
1368         pg_data_t *pgdat = (pg_data_t *)arg;
1369         walk_zones_in_node(m, pgdat, true, false, frag_show_print);
1370         return 0;
1371 }
1372
1373 static void pagetypeinfo_showfree_print(struct seq_file *m,
1374                                         pg_data_t *pgdat, struct zone *zone)
1375 {
1376         int order, mtype;
1377
1378         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
1379                 seq_printf(m, "Node %4d, zone %8s, type %12s ",
1380                                         pgdat->node_id,
1381                                         zone->name,
1382                                         migratetype_names[mtype]);
1383                 for (order = 0; order < MAX_ORDER; ++order) {
1384                         unsigned long freecount = 0;
1385                         struct free_area *area;
1386                         struct list_head *curr;
1387                         bool overflow = false;
1388
1389                         area = &(zone->free_area[order]);
1390
1391                         list_for_each(curr, &area->free_list[mtype]) {
1392                                 /*
1393                                  * Cap the free_list iteration because it might
1394                                  * be really large and we are under a spinlock
1395                                  * so a long time spent here could trigger a
1396                                  * hard lockup detector. Anyway this is a
1397                                  * debugging tool so knowing there is a handful
1398                                  * of pages of this order should be more than
1399                                  * sufficient.
1400                                  */
1401                                 if (++freecount >= 100000) {
1402                                         overflow = true;
1403                                         break;
1404                                 }
1405                         }
1406                         seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
1407                         spin_unlock_irq(&zone->lock);
1408                         cond_resched();
1409                         spin_lock_irq(&zone->lock);
1410                 }
1411                 seq_putc(m, '\n');
1412         }
1413 }
1414
1415 /* Print out the free pages at each order for each migatetype */
1416 static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
1417 {
1418         int order;
1419         pg_data_t *pgdat = (pg_data_t *)arg;
1420
1421         /* Print header */
1422         seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
1423         for (order = 0; order < MAX_ORDER; ++order)
1424                 seq_printf(m, "%6d ", order);
1425         seq_putc(m, '\n');
1426
1427         walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print);
1428
1429         return 0;
1430 }
1431
1432 static void pagetypeinfo_showblockcount_print(struct seq_file *m,
1433                                         pg_data_t *pgdat, struct zone *zone)
1434 {
1435         int mtype;
1436         unsigned long pfn;
1437         unsigned long start_pfn = zone->zone_start_pfn;
1438         unsigned long end_pfn = zone_end_pfn(zone);
1439         unsigned long count[MIGRATE_TYPES] = { 0, };
1440
1441         for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1442                 struct page *page;
1443
1444                 page = pfn_to_online_page(pfn);
1445                 if (!page)
1446                         continue;
1447
1448                 /* Watch for unexpected holes punched in the memmap */
1449                 if (!memmap_valid_within(pfn, page, zone))
1450                         continue;
1451
1452                 if (page_zone(page) != zone)
1453                         continue;
1454
1455                 mtype = get_pageblock_migratetype(page);
1456
1457                 if (mtype < MIGRATE_TYPES)
1458                         count[mtype]++;
1459         }
1460
1461         /* Print counts */
1462         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1463         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1464                 seq_printf(m, "%12lu ", count[mtype]);
1465         seq_putc(m, '\n');
1466 }
1467
1468 /* Print out the number of pageblocks for each migratetype */
1469 static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1470 {
1471         int mtype;
1472         pg_data_t *pgdat = (pg_data_t *)arg;
1473
1474         seq_printf(m, "\n%-23s", "Number of blocks type ");
1475         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1476                 seq_printf(m, "%12s ", migratetype_names[mtype]);
1477         seq_putc(m, '\n');
1478         walk_zones_in_node(m, pgdat, true, false,
1479                 pagetypeinfo_showblockcount_print);
1480
1481         return 0;
1482 }
1483
1484 /*
1485  * Print out the number of pageblocks for each migratetype that contain pages
1486  * of other types. This gives an indication of how well fallbacks are being
1487  * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1488  * to determine what is going on
1489  */
1490 static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1491 {
1492 #ifdef CONFIG_PAGE_OWNER
1493         int mtype;
1494
1495         if (!static_branch_unlikely(&page_owner_inited))
1496                 return;
1497
1498         drain_all_pages(NULL);
1499
1500         seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1501         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1502                 seq_printf(m, "%12s ", migratetype_names[mtype]);
1503         seq_putc(m, '\n');
1504
1505         walk_zones_in_node(m, pgdat, true, true,
1506                 pagetypeinfo_showmixedcount_print);
1507 #endif /* CONFIG_PAGE_OWNER */
1508 }
1509
1510 /*
1511  * This prints out statistics in relation to grouping pages by mobility.
1512  * It is expensive to collect so do not constantly read the file.
1513  */
1514 static int pagetypeinfo_show(struct seq_file *m, void *arg)
1515 {
1516         pg_data_t *pgdat = (pg_data_t *)arg;
1517
1518         /* check memoryless node */
1519         if (!node_state(pgdat->node_id, N_MEMORY))
1520                 return 0;
1521
1522         seq_printf(m, "Page block order: %d\n", pageblock_order);
1523         seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
1524         seq_putc(m, '\n');
1525         pagetypeinfo_showfree(m, pgdat);
1526         pagetypeinfo_showblockcount(m, pgdat);
1527         pagetypeinfo_showmixedcount(m, pgdat);
1528
1529         return 0;
1530 }
1531
1532 static const struct seq_operations fragmentation_op = {
1533         .start  = frag_start,
1534         .next   = frag_next,
1535         .stop   = frag_stop,
1536         .show   = frag_show,
1537 };
1538
1539 static const struct seq_operations pagetypeinfo_op = {
1540         .start  = frag_start,
1541         .next   = frag_next,
1542         .stop   = frag_stop,
1543         .show   = pagetypeinfo_show,
1544 };
1545
1546 static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
1547 {
1548         int zid;
1549
1550         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1551                 struct zone *compare = &pgdat->node_zones[zid];
1552
1553                 if (populated_zone(compare))
1554                         return zone == compare;
1555         }
1556
1557         return false;
1558 }
1559
1560 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1561                                                         struct zone *zone)
1562 {
1563         int i;
1564         seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1565         if (is_zone_first_populated(pgdat, zone)) {
1566                 seq_printf(m, "\n  per-node stats");
1567                 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1568                         seq_printf(m, "\n      %-12s %lu", node_stat_name(i),
1569                                    node_page_state(pgdat, i));
1570                 }
1571         }
1572         seq_printf(m,
1573                    "\n  pages free     %lu"
1574                    "\n        min      %lu"
1575                    "\n        low      %lu"
1576                    "\n        high     %lu"
1577                    "\n        spanned  %lu"
1578                    "\n        present  %lu"
1579                    "\n        managed  %lu",
1580                    zone_page_state(zone, NR_FREE_PAGES),
1581                    min_wmark_pages(zone),
1582                    low_wmark_pages(zone),
1583                    high_wmark_pages(zone),
1584                    zone->spanned_pages,
1585                    zone->present_pages,
1586                    zone_managed_pages(zone));
1587
1588         seq_printf(m,
1589                    "\n        protection: (%ld",
1590                    zone->lowmem_reserve[0]);
1591         for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1592                 seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1593         seq_putc(m, ')');
1594
1595         /* If unpopulated, no other information is useful */
1596         if (!populated_zone(zone)) {
1597                 seq_putc(m, '\n');
1598                 return;
1599         }
1600
1601         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1602                 seq_printf(m, "\n      %-12s %lu", zone_stat_name(i),
1603                            zone_page_state(zone, i));
1604
1605 #ifdef CONFIG_NUMA
1606         for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
1607                 seq_printf(m, "\n      %-12s %lu", numa_stat_name(i),
1608                            zone_numa_state_snapshot(zone, i));
1609 #endif
1610
1611         seq_printf(m, "\n  pagesets");
1612         for_each_online_cpu(i) {
1613                 struct per_cpu_pageset *pageset;
1614
1615                 pageset = per_cpu_ptr(zone->pageset, i);
1616                 seq_printf(m,
1617                            "\n    cpu: %i"
1618                            "\n              count: %i"
1619                            "\n              high:  %i"
1620                            "\n              batch: %i",
1621                            i,
1622                            pageset->pcp.count,
1623                            pageset->pcp.high,
1624                            pageset->pcp.batch);
1625 #ifdef CONFIG_SMP
1626                 seq_printf(m, "\n  vm stats threshold: %d",
1627                                 pageset->stat_threshold);
1628 #endif
1629         }
1630         seq_printf(m,
1631                    "\n  node_unreclaimable:  %u"
1632                    "\n  start_pfn:           %lu",
1633                    pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
1634                    zone->zone_start_pfn);
1635         seq_putc(m, '\n');
1636 }
1637
1638 /*
1639  * Output information about zones in @pgdat.  All zones are printed regardless
1640  * of whether they are populated or not: lowmem_reserve_ratio operates on the
1641  * set of all zones and userspace would not be aware of such zones if they are
1642  * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio).
1643  */
1644 static int zoneinfo_show(struct seq_file *m, void *arg)
1645 {
1646         pg_data_t *pgdat = (pg_data_t *)arg;
1647         walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print);
1648         return 0;
1649 }
1650
1651 static const struct seq_operations zoneinfo_op = {
1652         .start  = frag_start, /* iterate over all zones. The same as in
1653                                * fragmentation. */
1654         .next   = frag_next,
1655         .stop   = frag_stop,
1656         .show   = zoneinfo_show,
1657 };
1658
1659 #define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \
1660                          NR_VM_NUMA_STAT_ITEMS + \
1661                          NR_VM_NODE_STAT_ITEMS + \
1662                          NR_VM_WRITEBACK_STAT_ITEMS + \
1663                          (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \
1664                           NR_VM_EVENT_ITEMS : 0))
1665
1666 static void *vmstat_start(struct seq_file *m, loff_t *pos)
1667 {
1668         unsigned long *v;
1669         int i;
1670
1671         if (*pos >= NR_VMSTAT_ITEMS)
1672                 return NULL;
1673
1674         BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) < NR_VMSTAT_ITEMS);
1675         v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL);
1676         m->private = v;
1677         if (!v)
1678                 return ERR_PTR(-ENOMEM);
1679         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1680                 v[i] = global_zone_page_state(i);
1681         v += NR_VM_ZONE_STAT_ITEMS;
1682
1683 #ifdef CONFIG_NUMA
1684         for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
1685                 v[i] = global_numa_state(i);
1686         v += NR_VM_NUMA_STAT_ITEMS;
1687 #endif
1688
1689         for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
1690                 v[i] = global_node_page_state(i);
1691         v += NR_VM_NODE_STAT_ITEMS;
1692
1693         global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1694                             v + NR_DIRTY_THRESHOLD);
1695         v += NR_VM_WRITEBACK_STAT_ITEMS;
1696
1697 #ifdef CONFIG_VM_EVENT_COUNTERS
1698         all_vm_events(v);
1699         v[PGPGIN] /= 2;         /* sectors -> kbytes */
1700         v[PGPGOUT] /= 2;
1701 #endif
1702         return (unsigned long *)m->private + *pos;
1703 }
1704
1705 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1706 {
1707         (*pos)++;
1708         if (*pos >= NR_VMSTAT_ITEMS)
1709                 return NULL;
1710         return (unsigned long *)m->private + *pos;
1711 }
1712
1713 static int vmstat_show(struct seq_file *m, void *arg)
1714 {
1715         unsigned long *l = arg;
1716         unsigned long off = l - (unsigned long *)m->private;
1717
1718         seq_puts(m, vmstat_text[off]);
1719         seq_put_decimal_ull(m, " ", *l);
1720         seq_putc(m, '\n');
1721         return 0;
1722 }
1723
1724 static void vmstat_stop(struct seq_file *m, void *arg)
1725 {
1726         kfree(m->private);
1727         m->private = NULL;
1728 }
1729
1730 static const struct seq_operations vmstat_op = {
1731         .start  = vmstat_start,
1732         .next   = vmstat_next,
1733         .stop   = vmstat_stop,
1734         .show   = vmstat_show,
1735 };
1736 #endif /* CONFIG_PROC_FS */
1737
1738 #ifdef CONFIG_SMP
1739 static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1740 int sysctl_stat_interval __read_mostly = HZ;
1741
1742 #ifdef CONFIG_PROC_FS
1743 static void refresh_vm_stats(struct work_struct *work)
1744 {
1745         refresh_cpu_vm_stats(true);
1746 }
1747
1748 int vmstat_refresh(struct ctl_table *table, int write,
1749                    void __user *buffer, size_t *lenp, loff_t *ppos)
1750 {
1751         long val;
1752         int err;
1753         int i;
1754
1755         /*
1756          * The regular update, every sysctl_stat_interval, may come later
1757          * than expected: leaving a significant amount in per_cpu buckets.
1758          * This is particularly misleading when checking a quantity of HUGE
1759          * pages, immediately after running a test.  /proc/sys/vm/stat_refresh,
1760          * which can equally be echo'ed to or cat'ted from (by root),
1761          * can be used to update the stats just before reading them.
1762          *
1763          * Oh, and since global_zone_page_state() etc. are so careful to hide
1764          * transiently negative values, report an error here if any of
1765          * the stats is negative, so we know to go looking for imbalance.
1766          */
1767         err = schedule_on_each_cpu(refresh_vm_stats);
1768         if (err)
1769                 return err;
1770         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
1771                 val = atomic_long_read(&vm_zone_stat[i]);
1772                 if (val < 0) {
1773                         pr_warn("%s: %s %ld\n",
1774                                 __func__, zone_stat_name(i), val);
1775                         err = -EINVAL;
1776                 }
1777         }
1778 #ifdef CONFIG_NUMA
1779         for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) {
1780                 val = atomic_long_read(&vm_numa_stat[i]);
1781                 if (val < 0) {
1782                         pr_warn("%s: %s %ld\n",
1783                                 __func__, numa_stat_name(i), val);
1784                         err = -EINVAL;
1785                 }
1786         }
1787 #endif
1788         if (err)
1789                 return err;
1790         if (write)
1791                 *ppos += *lenp;
1792         else
1793                 *lenp = 0;
1794         return 0;
1795 }
1796 #endif /* CONFIG_PROC_FS */
1797
1798 static void vmstat_update(struct work_struct *w)
1799 {
1800         if (refresh_cpu_vm_stats(true)) {
1801                 /*
1802                  * Counters were updated so we expect more updates
1803                  * to occur in the future. Keep on running the
1804                  * update worker thread.
1805                  */
1806                 queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
1807                                 this_cpu_ptr(&vmstat_work),
1808                                 round_jiffies_relative(sysctl_stat_interval));
1809         }
1810 }
1811
1812 /*
1813  * Switch off vmstat processing and then fold all the remaining differentials
1814  * until the diffs stay at zero. The function is used by NOHZ and can only be
1815  * invoked when tick processing is not active.
1816  */
1817 /*
1818  * Check if the diffs for a certain cpu indicate that
1819  * an update is needed.
1820  */
1821 static bool need_update(int cpu)
1822 {
1823         struct zone *zone;
1824
1825         for_each_populated_zone(zone) {
1826                 struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
1827
1828                 BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1);
1829 #ifdef CONFIG_NUMA
1830                 BUILD_BUG_ON(sizeof(p->vm_numa_stat_diff[0]) != 2);
1831 #endif
1832
1833                 /*
1834                  * The fast way of checking if there are any vmstat diffs.
1835                  */
1836                 if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS *
1837                                sizeof(p->vm_stat_diff[0])))
1838                         return true;
1839 #ifdef CONFIG_NUMA
1840                 if (memchr_inv(p->vm_numa_stat_diff, 0, NR_VM_NUMA_STAT_ITEMS *
1841                                sizeof(p->vm_numa_stat_diff[0])))
1842                         return true;
1843 #endif
1844         }
1845         return false;
1846 }
1847
1848 /*
1849  * Switch off vmstat processing and then fold all the remaining differentials
1850  * until the diffs stay at zero. The function is used by NOHZ and can only be
1851  * invoked when tick processing is not active.
1852  */
1853 void quiet_vmstat(void)
1854 {
1855         if (system_state != SYSTEM_RUNNING)
1856                 return;
1857
1858         if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
1859                 return;
1860
1861         if (!need_update(smp_processor_id()))
1862                 return;
1863
1864         /*
1865          * Just refresh counters and do not care about the pending delayed
1866          * vmstat_update. It doesn't fire that often to matter and canceling
1867          * it would be too expensive from this path.
1868          * vmstat_shepherd will take care about that for us.
1869          */
1870         refresh_cpu_vm_stats(false);
1871 }
1872
1873 /*
1874  * Shepherd worker thread that checks the
1875  * differentials of processors that have their worker
1876  * threads for vm statistics updates disabled because of
1877  * inactivity.
1878  */
1879 static void vmstat_shepherd(struct work_struct *w);
1880
1881 static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
1882
1883 static void vmstat_shepherd(struct work_struct *w)
1884 {
1885         int cpu;
1886
1887         get_online_cpus();
1888         /* Check processors whose vmstat worker threads have been disabled */
1889         for_each_online_cpu(cpu) {
1890                 struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
1891
1892                 if (!delayed_work_pending(dw) && need_update(cpu))
1893                         queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
1894         }
1895         put_online_cpus();
1896
1897         schedule_delayed_work(&shepherd,
1898                 round_jiffies_relative(sysctl_stat_interval));
1899 }
1900
1901 static void __init start_shepherd_timer(void)
1902 {
1903         int cpu;
1904
1905         for_each_possible_cpu(cpu)
1906                 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
1907                         vmstat_update);
1908
1909         schedule_delayed_work(&shepherd,
1910                 round_jiffies_relative(sysctl_stat_interval));
1911 }
1912
1913 static void __init init_cpu_node_state(void)
1914 {
1915         int node;
1916
1917         for_each_online_node(node) {
1918                 if (cpumask_weight(cpumask_of_node(node)) > 0)
1919                         node_set_state(node, N_CPU);
1920         }
1921 }
1922
1923 static int vmstat_cpu_online(unsigned int cpu)
1924 {
1925         refresh_zone_stat_thresholds();
1926         node_set_state(cpu_to_node(cpu), N_CPU);
1927         return 0;
1928 }
1929
1930 static int vmstat_cpu_down_prep(unsigned int cpu)
1931 {
1932         cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1933         return 0;
1934 }
1935
1936 static int vmstat_cpu_dead(unsigned int cpu)
1937 {
1938         const struct cpumask *node_cpus;
1939         int node;
1940
1941         node = cpu_to_node(cpu);
1942
1943         refresh_zone_stat_thresholds();
1944         node_cpus = cpumask_of_node(node);
1945         if (cpumask_weight(node_cpus) > 0)
1946                 return 0;
1947
1948         node_clear_state(node, N_CPU);
1949         return 0;
1950 }
1951
1952 #endif
1953
1954 struct workqueue_struct *mm_percpu_wq;
1955
1956 void __init init_mm_internals(void)
1957 {
1958         int ret __maybe_unused;
1959
1960         mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
1961
1962 #ifdef CONFIG_SMP
1963         ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
1964                                         NULL, vmstat_cpu_dead);
1965         if (ret < 0)
1966                 pr_err("vmstat: failed to register 'dead' hotplug state\n");
1967
1968         ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
1969                                         vmstat_cpu_online,
1970                                         vmstat_cpu_down_prep);
1971         if (ret < 0)
1972                 pr_err("vmstat: failed to register 'online' hotplug state\n");
1973
1974         get_online_cpus();
1975         init_cpu_node_state();
1976         put_online_cpus();
1977
1978         start_shepherd_timer();
1979 #endif
1980 #ifdef CONFIG_PROC_FS
1981         proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
1982         proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
1983         proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
1984         proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
1985 #endif
1986 }
1987
1988 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1989
1990 /*
1991  * Return an index indicating how much of the available free memory is
1992  * unusable for an allocation of the requested size.
1993  */
1994 static int unusable_free_index(unsigned int order,
1995                                 struct contig_page_info *info)
1996 {
1997         /* No free memory is interpreted as all free memory is unusable */
1998         if (info->free_pages == 0)
1999                 return 1000;
2000
2001         /*
2002          * Index should be a value between 0 and 1. Return a value to 3
2003          * decimal places.
2004          *
2005          * 0 => no fragmentation
2006          * 1 => high fragmentation
2007          */
2008         return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
2009
2010 }
2011
2012 static void unusable_show_print(struct seq_file *m,
2013                                         pg_data_t *pgdat, struct zone *zone)
2014 {
2015         unsigned int order;
2016         int index;
2017         struct contig_page_info info;
2018
2019         seq_printf(m, "Node %d, zone %8s ",
2020                                 pgdat->node_id,
2021                                 zone->name);
2022         for (order = 0; order < MAX_ORDER; ++order) {
2023                 fill_contig_page_info(zone, order, &info);
2024                 index = unusable_free_index(order, &info);
2025                 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2026         }
2027
2028         seq_putc(m, '\n');
2029 }
2030
2031 /*
2032  * Display unusable free space index
2033  *
2034  * The unusable free space index measures how much of the available free
2035  * memory cannot be used to satisfy an allocation of a given size and is a
2036  * value between 0 and 1. The higher the value, the more of free memory is
2037  * unusable and by implication, the worse the external fragmentation is. This
2038  * can be expressed as a percentage by multiplying by 100.
2039  */
2040 static int unusable_show(struct seq_file *m, void *arg)
2041 {
2042         pg_data_t *pgdat = (pg_data_t *)arg;
2043
2044         /* check memoryless node */
2045         if (!node_state(pgdat->node_id, N_MEMORY))
2046                 return 0;
2047
2048         walk_zones_in_node(m, pgdat, true, false, unusable_show_print);
2049
2050         return 0;
2051 }
2052
2053 static const struct seq_operations unusable_op = {
2054         .start  = frag_start,
2055         .next   = frag_next,
2056         .stop   = frag_stop,
2057         .show   = unusable_show,
2058 };
2059
2060 static int unusable_open(struct inode *inode, struct file *file)
2061 {
2062         return seq_open(file, &unusable_op);
2063 }
2064
2065 static const struct file_operations unusable_file_ops = {
2066         .open           = unusable_open,
2067         .read           = seq_read,
2068         .llseek         = seq_lseek,
2069         .release        = seq_release,
2070 };
2071
2072 static void extfrag_show_print(struct seq_file *m,
2073                                         pg_data_t *pgdat, struct zone *zone)
2074 {
2075         unsigned int order;
2076         int index;
2077
2078         /* Alloc on stack as interrupts are disabled for zone walk */
2079         struct contig_page_info info;
2080
2081         seq_printf(m, "Node %d, zone %8s ",
2082                                 pgdat->node_id,
2083                                 zone->name);
2084         for (order = 0; order < MAX_ORDER; ++order) {
2085                 fill_contig_page_info(zone, order, &info);
2086                 index = __fragmentation_index(order, &info);
2087                 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2088         }
2089
2090         seq_putc(m, '\n');
2091 }
2092
2093 /*
2094  * Display fragmentation index for orders that allocations would fail for
2095  */
2096 static int extfrag_show(struct seq_file *m, void *arg)
2097 {
2098         pg_data_t *pgdat = (pg_data_t *)arg;
2099
2100         walk_zones_in_node(m, pgdat, true, false, extfrag_show_print);
2101
2102         return 0;
2103 }
2104
2105 static const struct seq_operations extfrag_op = {
2106         .start  = frag_start,
2107         .next   = frag_next,
2108         .stop   = frag_stop,
2109         .show   = extfrag_show,
2110 };
2111
2112 static int extfrag_open(struct inode *inode, struct file *file)
2113 {
2114         return seq_open(file, &extfrag_op);
2115 }
2116
2117 static const struct file_operations extfrag_file_ops = {
2118         .open           = extfrag_open,
2119         .read           = seq_read,
2120         .llseek         = seq_lseek,
2121         .release        = seq_release,
2122 };
2123
2124 static int __init extfrag_debug_init(void)
2125 {
2126         struct dentry *extfrag_debug_root;
2127
2128         extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
2129
2130         debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL,
2131                             &unusable_file_ops);
2132
2133         debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL,
2134                             &extfrag_file_ops);
2135
2136         return 0;
2137 }
2138
2139 module_init(extfrag_debug_init);
2140 #endif