1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
4 #include <linux/types.h>
5 #include <linux/percpu.h>
7 #include <linux/mmzone.h>
8 #include <asm/atomic.h>
10 #ifdef CONFIG_ZONE_DMA
11 #define DMA_ZONE(xx) xx##_DMA,
16 #ifdef CONFIG_ZONE_DMA32
17 #define DMA32_ZONE(xx) xx##_DMA32,
19 #define DMA32_ZONE(xx)
23 #define HIGHMEM_ZONE(xx) , xx##_HIGH
25 #define HIGHMEM_ZONE(xx)
29 #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
31 enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
32 FOR_ALL_ZONES(PGALLOC),
33 PGFREE, PGACTIVATE, PGDEACTIVATE,
35 FOR_ALL_ZONES(PGREFILL),
36 FOR_ALL_ZONES(PGSTEAL),
37 FOR_ALL_ZONES(PGSCAN_KSWAPD),
38 FOR_ALL_ZONES(PGSCAN_DIRECT),
40 PGSCAN_ZONE_RECLAIM_FAILED,
42 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
43 KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
44 KSWAPD_SKIP_CONGESTION_WAIT,
45 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
46 #ifdef CONFIG_COMPACTION
47 COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED,
48 COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
50 #ifdef CONFIG_HUGETLB_PAGE
51 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
53 UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
54 UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
55 UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
56 UNEVICTABLE_PGMLOCKED,
57 UNEVICTABLE_PGMUNLOCKED,
58 UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
59 UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
60 UNEVICTABLE_MLOCKFREED,
61 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
65 THP_COLLAPSE_ALLOC_FAILED,
71 extern int sysctl_stat_interval;
73 #ifdef CONFIG_VM_EVENT_COUNTERS
75 * Light weight per cpu counter implementation.
77 * Counters should only be incremented and no critical kernel component
78 * should rely on the counter values.
80 * Counters are handled completely inline. On many platforms the code
81 * generated will simply be the increment of a global address.
84 struct vm_event_state {
85 unsigned long event[NR_VM_EVENT_ITEMS];
88 DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
90 static inline void __count_vm_event(enum vm_event_item item)
92 __this_cpu_inc(vm_event_states.event[item]);
95 static inline void count_vm_event(enum vm_event_item item)
97 this_cpu_inc(vm_event_states.event[item]);
100 static inline void __count_vm_events(enum vm_event_item item, long delta)
102 __this_cpu_add(vm_event_states.event[item], delta);
105 static inline void count_vm_events(enum vm_event_item item, long delta)
107 this_cpu_add(vm_event_states.event[item], delta);
110 extern void all_vm_events(unsigned long *);
111 #ifdef CONFIG_HOTPLUG
112 extern void vm_events_fold_cpu(int cpu);
114 static inline void vm_events_fold_cpu(int cpu)
121 /* Disable counters */
122 static inline void count_vm_event(enum vm_event_item item)
125 static inline void count_vm_events(enum vm_event_item item, long delta)
128 static inline void __count_vm_event(enum vm_event_item item)
131 static inline void __count_vm_events(enum vm_event_item item, long delta)
134 static inline void all_vm_events(unsigned long *ret)
137 static inline void vm_events_fold_cpu(int cpu)
141 #endif /* CONFIG_VM_EVENT_COUNTERS */
143 #define __count_zone_vm_events(item, zone, delta) \
144 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
145 zone_idx(zone), delta)
148 * Zone based page accounting with per cpu differentials.
150 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
152 static inline void zone_page_state_add(long x, struct zone *zone,
153 enum zone_stat_item item)
155 atomic_long_add(x, &zone->vm_stat[item]);
156 atomic_long_add(x, &vm_stat[item]);
159 static inline unsigned long global_page_state(enum zone_stat_item item)
161 long x = atomic_long_read(&vm_stat[item]);
169 static inline unsigned long zone_page_state(struct zone *zone,
170 enum zone_stat_item item)
172 long x = atomic_long_read(&zone->vm_stat[item]);
181 * More accurate version that also considers the currently pending
182 * deltas. For that we need to loop over all cpus to find the current
183 * deltas. There is no synchronization so the result cannot be
184 * exactly accurate either.
186 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
187 enum zone_stat_item item)
189 long x = atomic_long_read(&zone->vm_stat[item]);
193 for_each_online_cpu(cpu)
194 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
202 extern unsigned long global_reclaimable_pages(void);
203 extern unsigned long zone_reclaimable_pages(struct zone *zone);
207 * Determine the per node value of a stat item. This function
208 * is called frequently in a NUMA machine, so try to be as
209 * frugal as possible.
211 static inline unsigned long node_page_state(int node,
212 enum zone_stat_item item)
214 struct zone *zones = NODE_DATA(node)->node_zones;
217 #ifdef CONFIG_ZONE_DMA
218 zone_page_state(&zones[ZONE_DMA], item) +
220 #ifdef CONFIG_ZONE_DMA32
221 zone_page_state(&zones[ZONE_DMA32], item) +
223 #ifdef CONFIG_HIGHMEM
224 zone_page_state(&zones[ZONE_HIGHMEM], item) +
226 zone_page_state(&zones[ZONE_NORMAL], item) +
227 zone_page_state(&zones[ZONE_MOVABLE], item);
230 extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
234 #define node_page_state(node, item) global_page_state(item)
235 #define zone_statistics(_zl, _z, gfp) do { } while (0)
237 #endif /* CONFIG_NUMA */
239 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
240 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
242 static inline void zap_zone_vm_stats(struct zone *zone)
244 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
247 extern void inc_zone_state(struct zone *, enum zone_stat_item);
250 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
251 void __inc_zone_page_state(struct page *, enum zone_stat_item);
252 void __dec_zone_page_state(struct page *, enum zone_stat_item);
254 void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
255 void inc_zone_page_state(struct page *, enum zone_stat_item);
256 void dec_zone_page_state(struct page *, enum zone_stat_item);
258 extern void inc_zone_state(struct zone *, enum zone_stat_item);
259 extern void __inc_zone_state(struct zone *, enum zone_stat_item);
260 extern void dec_zone_state(struct zone *, enum zone_stat_item);
261 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
263 void refresh_cpu_vm_stats(int);
265 int calculate_pressure_threshold(struct zone *zone);
266 int calculate_normal_threshold(struct zone *zone);
267 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
268 int (*calculate_pressure)(struct zone *));
269 #else /* CONFIG_SMP */
272 * We do not maintain differentials in a single processor configuration.
273 * The functions directly modify the zone and global counters.
275 static inline void __mod_zone_page_state(struct zone *zone,
276 enum zone_stat_item item, int delta)
278 zone_page_state_add(delta, zone, item);
281 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
283 atomic_long_inc(&zone->vm_stat[item]);
284 atomic_long_inc(&vm_stat[item]);
287 static inline void __inc_zone_page_state(struct page *page,
288 enum zone_stat_item item)
290 __inc_zone_state(page_zone(page), item);
293 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
295 atomic_long_dec(&zone->vm_stat[item]);
296 atomic_long_dec(&vm_stat[item]);
299 static inline void __dec_zone_page_state(struct page *page,
300 enum zone_stat_item item)
302 __dec_zone_state(page_zone(page), item);
306 * We only use atomic operations to update counters. So there is no need to
307 * disable interrupts.
309 #define inc_zone_page_state __inc_zone_page_state
310 #define dec_zone_page_state __dec_zone_page_state
311 #define mod_zone_page_state __mod_zone_page_state
313 #define set_pgdat_percpu_threshold(pgdat, callback) { }
315 static inline void refresh_cpu_vm_stats(int cpu) { }
318 #endif /* _LINUX_VMSTAT_H */