1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/memblock.h>
3 #include <linux/compiler.h>
5 #include <linux/init.h>
8 #include <linux/mmzone.h>
9 #include <linux/huge_mm.h>
10 #include <linux/proc_fs.h>
11 #include <linux/seq_file.h>
12 #include <linux/hugetlb.h>
13 #include <linux/memremap.h>
14 #include <linux/memcontrol.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/page_idle.h>
17 #include <linux/kernel-page-flags.h>
18 #include <linux/uaccess.h>
21 #define KPMSIZE sizeof(u64)
22 #define KPMMASK (KPMSIZE - 1)
23 #define KPMBITS (KPMSIZE * BITS_PER_BYTE)
25 static inline unsigned long get_max_dump_pfn(void)
27 #ifdef CONFIG_SPARSEMEM
29 * The memmap of early sections is completely populated and marked
30 * online even if max_pfn does not fall on a section boundary -
31 * pfn_to_online_page() will succeed on all pages. Allow inspecting
34 return round_up(max_pfn, PAGES_PER_SECTION);
40 /* /proc/kpagecount - an array exposing page counts
42 * Each entry is a u64 representing the corresponding
43 * physical page count.
45 static ssize_t kpagecount_read(struct file *file, char __user *buf,
46 size_t count, loff_t *ppos)
48 const unsigned long max_dump_pfn = get_max_dump_pfn();
49 u64 __user *out = (u64 __user *)buf;
51 unsigned long src = *ppos;
57 if (src & KPMMASK || count & KPMMASK)
59 if (src >= max_dump_pfn * KPMSIZE)
61 count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
65 * TODO: ZONE_DEVICE support requires to identify
66 * memmaps that were actually initialized.
68 ppage = pfn_to_online_page(pfn);
70 if (!ppage || PageSlab(ppage) || page_has_type(ppage))
73 pcount = page_mapcount(ppage);
75 if (put_user(pcount, out)) {
87 *ppos += (char __user *)out - buf;
89 ret = (char __user *)out - buf;
93 static const struct proc_ops kpagecount_proc_ops = {
94 .proc_flags = PROC_ENTRY_PERMANENT,
95 .proc_lseek = mem_lseek,
96 .proc_read = kpagecount_read,
99 /* /proc/kpageflags - an array exposing page flags
101 * Each entry is a u64 representing the corresponding
102 * physical page flags.
105 static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit)
107 return ((kflags >> kbit) & 1) << ubit;
110 u64 stable_page_flags(struct page *page)
116 * pseudo flag: KPF_NOPAGE
117 * it differentiates a memory hole from a page with no flags
120 return 1 << KPF_NOPAGE;
126 * pseudo flags for the well known (anonymous) memory mapped pages
128 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the
129 * simple test in page_mapped() is not enough.
131 if (!PageSlab(page) && page_mapped(page))
139 * compound pages: export both head/tail info
140 * they together define a compound page's start/end pos and order
143 u |= 1 << KPF_COMPOUND_HEAD;
145 u |= 1 << KPF_COMPOUND_TAIL;
149 * PageTransCompound can be true for non-huge compound pages (slab
150 * pages or pages allocated by drivers with __GFP_COMP) because it
151 * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon
152 * to make sure a given page is a thp, not a non-huge compound page.
154 else if (PageTransCompound(page)) {
155 struct page *head = compound_head(page);
157 if (PageLRU(head) || PageAnon(head))
159 else if (is_huge_zero_page(head)) {
160 u |= 1 << KPF_ZERO_PAGE;
163 } else if (is_zero_pfn(page_to_pfn(page)))
164 u |= 1 << KPF_ZERO_PAGE;
168 * Caveats on high order pages: page->_refcount will only be set
169 * -1 on the head page; SLUB/SLQB do the same for PG_slab;
170 * SLOB won't set PG_slab at all on compound pages.
174 else if (page_count(page) == 0 && is_free_buddy_page(page))
177 if (PageOffline(page))
178 u |= 1 << KPF_OFFLINE;
180 u |= 1 << KPF_PGTABLE;
182 if (page_is_idle(page))
185 u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked);
187 u |= kpf_copy_bit(k, KPF_SLAB, PG_slab);
188 if (PageTail(page) && PageSlab(compound_head(page)))
191 u |= kpf_copy_bit(k, KPF_ERROR, PG_error);
192 u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty);
193 u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate);
194 u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback);
196 u |= kpf_copy_bit(k, KPF_LRU, PG_lru);
197 u |= kpf_copy_bit(k, KPF_REFERENCED, PG_referenced);
198 u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active);
199 u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim);
201 if (PageSwapCache(page))
202 u |= 1 << KPF_SWAPCACHE;
203 u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked);
205 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable);
206 u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked);
208 #ifdef CONFIG_MEMORY_FAILURE
209 u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison);
212 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
213 u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached);
216 u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved);
217 u |= kpf_copy_bit(k, KPF_MAPPEDTODISK, PG_mappedtodisk);
218 u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private);
219 u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2);
220 u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1);
221 u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1);
223 u |= kpf_copy_bit(k, KPF_ARCH_2, PG_arch_2);
229 static ssize_t kpageflags_read(struct file *file, char __user *buf,
230 size_t count, loff_t *ppos)
232 const unsigned long max_dump_pfn = get_max_dump_pfn();
233 u64 __user *out = (u64 __user *)buf;
235 unsigned long src = *ppos;
240 if (src & KPMMASK || count & KPMMASK)
242 if (src >= max_dump_pfn * KPMSIZE)
244 count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
248 * TODO: ZONE_DEVICE support requires to identify
249 * memmaps that were actually initialized.
251 ppage = pfn_to_online_page(pfn);
253 if (put_user(stable_page_flags(ppage), out)) {
265 *ppos += (char __user *)out - buf;
267 ret = (char __user *)out - buf;
271 static const struct proc_ops kpageflags_proc_ops = {
272 .proc_flags = PROC_ENTRY_PERMANENT,
273 .proc_lseek = mem_lseek,
274 .proc_read = kpageflags_read,
278 static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
279 size_t count, loff_t *ppos)
281 const unsigned long max_dump_pfn = get_max_dump_pfn();
282 u64 __user *out = (u64 __user *)buf;
284 unsigned long src = *ppos;
290 if (src & KPMMASK || count & KPMMASK)
292 if (src >= max_dump_pfn * KPMSIZE)
294 count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
298 * TODO: ZONE_DEVICE support requires to identify
299 * memmaps that were actually initialized.
301 ppage = pfn_to_online_page(pfn);
304 ino = page_cgroup_ino(ppage);
308 if (put_user(ino, out)) {
320 *ppos += (char __user *)out - buf;
322 ret = (char __user *)out - buf;
326 static const struct proc_ops kpagecgroup_proc_ops = {
327 .proc_flags = PROC_ENTRY_PERMANENT,
328 .proc_lseek = mem_lseek,
329 .proc_read = kpagecgroup_read,
331 #endif /* CONFIG_MEMCG */
333 static int __init proc_page_init(void)
335 proc_create("kpagecount", S_IRUSR, NULL, &kpagecount_proc_ops);
336 proc_create("kpageflags", S_IRUSR, NULL, &kpageflags_proc_ops);
338 proc_create("kpagecgroup", S_IRUSR, NULL, &kpagecgroup_proc_ops);
342 fs_initcall(proc_page_init);