1 // SPDX-License-Identifier: GPL-2.0-only
3 * Generic show_mem() implementation
5 * Copyright (C) 2008 Johannes Weiner <hannes@saeurebad.de>
8 #include <linux/blkdev.h>
10 #include <linux/cpuset.h>
11 #include <linux/highmem.h>
12 #include <linux/hugetlb.h>
14 #include <linux/mmzone.h>
15 #include <linux/swap.h>
16 #include <linux/vmstat.h>
21 atomic_long_t _totalram_pages __read_mostly;
22 EXPORT_SYMBOL(_totalram_pages);
23 unsigned long totalreserve_pages __read_mostly;
24 unsigned long totalcma_pages __read_mostly;
26 static inline void show_node(struct zone *zone)
28 if (IS_ENABLED(CONFIG_NUMA))
29 printk("Node %d ", zone_to_nid(zone));
32 long si_mem_available(void)
35 unsigned long pagecache;
36 unsigned long wmark_low = 0;
37 unsigned long pages[NR_LRU_LISTS];
38 unsigned long reclaimable;
42 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
43 pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
46 wmark_low += low_wmark_pages(zone);
49 * Estimate the amount of memory available for userspace allocations,
50 * without causing swapping or OOM.
52 available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
55 * Not all the page cache can be freed, otherwise the system will
56 * start swapping or thrashing. Assume at least half of the page
57 * cache, or the low watermark worth of cache, needs to stay.
59 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
60 pagecache -= min(pagecache / 2, wmark_low);
61 available += pagecache;
64 * Part of the reclaimable slab and other kernel memory consists of
65 * items that are in use, and cannot be freed. Cap this estimate at the
68 reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) +
69 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
70 available += reclaimable - min(reclaimable / 2, wmark_low);
76 EXPORT_SYMBOL_GPL(si_mem_available);
78 void si_meminfo(struct sysinfo *val)
80 val->totalram = totalram_pages();
81 val->sharedram = global_node_page_state(NR_SHMEM);
82 val->freeram = global_zone_page_state(NR_FREE_PAGES);
83 val->bufferram = nr_blockdev_pages();
84 val->totalhigh = totalhigh_pages();
85 val->freehigh = nr_free_highpages();
86 val->mem_unit = PAGE_SIZE;
89 EXPORT_SYMBOL(si_meminfo);
92 void si_meminfo_node(struct sysinfo *val, int nid)
94 int zone_type; /* needs to be signed */
95 unsigned long managed_pages = 0;
96 unsigned long managed_highpages = 0;
97 unsigned long free_highpages = 0;
98 pg_data_t *pgdat = NODE_DATA(nid);
100 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
101 managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
102 val->totalram = managed_pages;
103 val->sharedram = node_page_state(pgdat, NR_SHMEM);
104 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
105 #ifdef CONFIG_HIGHMEM
106 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
107 struct zone *zone = &pgdat->node_zones[zone_type];
109 if (is_highmem(zone)) {
110 managed_highpages += zone_managed_pages(zone);
111 free_highpages += zone_page_state(zone, NR_FREE_PAGES);
114 val->totalhigh = managed_highpages;
115 val->freehigh = free_highpages;
117 val->totalhigh = managed_highpages;
118 val->freehigh = free_highpages;
120 val->mem_unit = PAGE_SIZE;
125 * Determine whether the node should be displayed or not, depending on whether
126 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
128 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
130 if (!(flags & SHOW_MEM_FILTER_NODES))
134 * no node mask - aka implicit memory numa policy. Do not bother with
135 * the synchronization - read_mems_allowed_begin - because we do not
136 * have to be precise here.
139 nodemask = &cpuset_current_mems_allowed;
141 return !node_isset(nid, *nodemask);
144 static void show_migration_types(unsigned char type)
146 static const char types[MIGRATE_TYPES] = {
147 [MIGRATE_UNMOVABLE] = 'U',
148 [MIGRATE_MOVABLE] = 'M',
149 [MIGRATE_RECLAIMABLE] = 'E',
150 [MIGRATE_HIGHATOMIC] = 'H',
154 #ifdef CONFIG_MEMORY_ISOLATION
155 [MIGRATE_ISOLATE] = 'I',
158 char tmp[MIGRATE_TYPES + 1];
162 for (i = 0; i < MIGRATE_TYPES; i++) {
168 printk(KERN_CONT "(%s) ", tmp);
171 static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx)
174 for (zone_idx = 0; zone_idx <= max_zone_idx; zone_idx++)
175 if (zone_managed_pages(pgdat->node_zones + zone_idx))
181 * Show free area list (used inside shift_scroll-lock stuff)
182 * We also calculate the percentage fragmentation. We do this by counting the
183 * memory on each free list with the exception of the first item on the list.
186 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
189 void __show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx)
191 unsigned long free_pcp = 0;
196 for_each_populated_zone(zone) {
197 if (zone_idx(zone) > max_zone_idx)
199 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
202 for_each_online_cpu(cpu)
203 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
206 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
207 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
208 " unevictable:%lu dirty:%lu writeback:%lu\n"
209 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
210 " mapped:%lu shmem:%lu pagetables:%lu\n"
211 " sec_pagetables:%lu bounce:%lu\n"
212 " kernel_misc_reclaimable:%lu\n"
213 " free:%lu free_pcp:%lu free_cma:%lu\n",
214 global_node_page_state(NR_ACTIVE_ANON),
215 global_node_page_state(NR_INACTIVE_ANON),
216 global_node_page_state(NR_ISOLATED_ANON),
217 global_node_page_state(NR_ACTIVE_FILE),
218 global_node_page_state(NR_INACTIVE_FILE),
219 global_node_page_state(NR_ISOLATED_FILE),
220 global_node_page_state(NR_UNEVICTABLE),
221 global_node_page_state(NR_FILE_DIRTY),
222 global_node_page_state(NR_WRITEBACK),
223 global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B),
224 global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B),
225 global_node_page_state(NR_FILE_MAPPED),
226 global_node_page_state(NR_SHMEM),
227 global_node_page_state(NR_PAGETABLE),
228 global_node_page_state(NR_SECONDARY_PAGETABLE),
229 global_zone_page_state(NR_BOUNCE),
230 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE),
231 global_zone_page_state(NR_FREE_PAGES),
233 global_zone_page_state(NR_FREE_CMA_PAGES));
235 for_each_online_pgdat(pgdat) {
236 if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
238 if (!node_has_managed_zones(pgdat, max_zone_idx))
243 " inactive_anon:%lukB"
245 " inactive_file:%lukB"
247 " isolated(anon):%lukB"
248 " isolated(file):%lukB"
253 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
255 " shmem_pmdmapped: %lukB"
258 " writeback_tmp:%lukB"
259 " kernel_stack:%lukB"
260 #ifdef CONFIG_SHADOW_CALL_STACK
261 " shadow_call_stack:%lukB"
264 " sec_pagetables:%lukB"
265 " all_unreclaimable? %s"
268 K(node_page_state(pgdat, NR_ACTIVE_ANON)),
269 K(node_page_state(pgdat, NR_INACTIVE_ANON)),
270 K(node_page_state(pgdat, NR_ACTIVE_FILE)),
271 K(node_page_state(pgdat, NR_INACTIVE_FILE)),
272 K(node_page_state(pgdat, NR_UNEVICTABLE)),
273 K(node_page_state(pgdat, NR_ISOLATED_ANON)),
274 K(node_page_state(pgdat, NR_ISOLATED_FILE)),
275 K(node_page_state(pgdat, NR_FILE_MAPPED)),
276 K(node_page_state(pgdat, NR_FILE_DIRTY)),
277 K(node_page_state(pgdat, NR_WRITEBACK)),
278 K(node_page_state(pgdat, NR_SHMEM)),
279 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
280 K(node_page_state(pgdat, NR_SHMEM_THPS)),
281 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
282 K(node_page_state(pgdat, NR_ANON_THPS)),
284 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
285 node_page_state(pgdat, NR_KERNEL_STACK_KB),
286 #ifdef CONFIG_SHADOW_CALL_STACK
287 node_page_state(pgdat, NR_KERNEL_SCS_KB),
289 K(node_page_state(pgdat, NR_PAGETABLE)),
290 K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
291 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
295 for_each_populated_zone(zone) {
298 if (zone_idx(zone) > max_zone_idx)
300 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
304 for_each_online_cpu(cpu)
305 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
315 " reserved_highatomic:%luKB"
317 " inactive_anon:%lukB"
319 " inactive_file:%lukB"
321 " writepending:%lukB"
331 K(zone_page_state(zone, NR_FREE_PAGES)),
332 K(zone->watermark_boost),
333 K(min_wmark_pages(zone)),
334 K(low_wmark_pages(zone)),
335 K(high_wmark_pages(zone)),
336 K(zone->nr_reserved_highatomic),
337 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
338 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
339 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
340 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
341 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
342 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
343 K(zone->present_pages),
344 K(zone_managed_pages(zone)),
345 K(zone_page_state(zone, NR_MLOCK)),
346 K(zone_page_state(zone, NR_BOUNCE)),
348 K(this_cpu_read(zone->per_cpu_pageset->count)),
349 K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
350 printk("lowmem_reserve[]:");
351 for (i = 0; i < MAX_NR_ZONES; i++)
352 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
353 printk(KERN_CONT "\n");
356 for_each_populated_zone(zone) {
358 unsigned long nr[MAX_ORDER + 1], flags, total = 0;
359 unsigned char types[MAX_ORDER + 1];
361 if (zone_idx(zone) > max_zone_idx)
363 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
366 printk(KERN_CONT "%s: ", zone->name);
368 spin_lock_irqsave(&zone->lock, flags);
369 for (order = 0; order <= MAX_ORDER; order++) {
370 struct free_area *area = &zone->free_area[order];
373 nr[order] = area->nr_free;
374 total += nr[order] << order;
377 for (type = 0; type < MIGRATE_TYPES; type++) {
378 if (!free_area_empty(area, type))
379 types[order] |= 1 << type;
382 spin_unlock_irqrestore(&zone->lock, flags);
383 for (order = 0; order <= MAX_ORDER; order++) {
384 printk(KERN_CONT "%lu*%lukB ",
385 nr[order], K(1UL) << order);
387 show_migration_types(types[order]);
389 printk(KERN_CONT "= %lukB\n", K(total));
392 for_each_online_node(nid) {
393 if (show_mem_node_skip(filter, nid, nodemask))
395 hugetlb_show_meminfo_node(nid);
398 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
400 show_swap_cache_info();
403 void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx)
405 unsigned long total = 0, reserved = 0, highmem = 0;
408 printk("Mem-Info:\n");
409 __show_free_areas(filter, nodemask, max_zone_idx);
411 for_each_populated_zone(zone) {
413 total += zone->present_pages;
414 reserved += zone->present_pages - zone_managed_pages(zone);
416 if (is_highmem(zone))
417 highmem += zone->present_pages;
420 printk("%lu pages RAM\n", total);
421 printk("%lu pages HighMem/MovableOnly\n", highmem);
422 printk("%lu pages reserved\n", reserved);
424 printk("%lu pages cma reserved\n", totalcma_pages);
426 #ifdef CONFIG_MEMORY_FAILURE
427 printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));